summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2014-03-31 14:38:28 +0200
committerFedor Indutny <fedor@indutny.com>2014-04-02 00:05:24 +0400
commit67e078094b53861a5aa7e9354e33487d0bd4f73b (patch)
tree09a706adee1ddb59c1507ee3320de9cb6896135b /deps/v8/src
parentf984555d47298cfb01b3e55c2861066379306fc3 (diff)
downloadnode-new-67e078094b53861a5aa7e9354e33487d0bd4f73b.tar.gz
deps: upgrade v8 to 3.25.30
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/accessors.cc41
-rw-r--r--deps/v8/src/accessors.h2
-rw-r--r--deps/v8/src/allocation-tracker.cc173
-rw-r--r--deps/v8/src/allocation-tracker.h56
-rw-r--r--deps/v8/src/api.cc806
-rw-r--r--deps/v8/src/api.h3
-rw-r--r--deps/v8/src/arm/OWNERS1
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h82
-rw-r--r--deps/v8/src/arm/assembler-arm.cc438
-rw-r--r--deps/v8/src/arm/assembler-arm.h120
-rw-r--r--deps/v8/src/arm/builtins-arm.cc178
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc516
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h2
-rw-r--r--deps/v8/src/arm/constants-arm.h2
-rw-r--r--deps/v8/src/arm/debug-arm.cc12
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc33
-rw-r--r--deps/v8/src/arm/disasm-arm.cc12
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc494
-rw-r--r--deps/v8/src/arm/ic-arm.cc26
-rw-r--r--deps/v8/src/arm/lithium-arm.cc278
-rw-r--r--deps/v8/src/arm/lithium-arm.h234
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc740
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h23
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc167
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h87
-rw-r--r--deps/v8/src/arm/simulator-arm.cc10
-rw-r--r--deps/v8/src/arm/simulator-arm.h4
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc190
-rw-r--r--deps/v8/src/arm64/OWNERS1
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h1229
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc2813
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h2233
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc1562
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc5743
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h500
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc615
-rw-r--r--deps/v8/src/arm64/codegen-arm64.h71
-rw-r--r--deps/v8/src/arm64/constants-arm64.h1271
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc199
-rw-r--r--deps/v8/src/arm64/cpu-arm64.h107
-rw-r--r--deps/v8/src/arm64/debug-arm64.cc393
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h671
-rw-r--r--deps/v8/src/arm64/decoder-arm64.cc109
-rw-r--r--deps/v8/src/arm64/decoder-arm64.h210
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc388
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc1856
-rw-r--r--deps/v8/src/arm64/disasm-arm64.h115
-rw-r--r--deps/v8/src/arm64/frames-arm64.cc65
-rw-r--r--deps/v8/src/arm64/frames-arm64.h133
-rw-r--r--deps/v8/src/arm64/full-codegen-arm64.cc5015
-rw-r--r--deps/v8/src/arm64/ic-arm64.cc1407
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc333
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h501
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc618
-rw-r--r--deps/v8/src/arm64/instrument-arm64.h107
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc2576
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h3100
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc5901
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.h490
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.cc334
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.h90
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h1677
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc5184
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h2310
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.cc1728
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.h315
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc3645
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h908
-rw-r--r--deps/v8/src/arm64/stub-cache-arm64.cc1496
-rw-r--r--deps/v8/src/arm64/utils-arm64.cc112
-rw-r--r--deps/v8/src/arm64/utils-arm64.h135
-rw-r--r--deps/v8/src/array-iterator.js10
-rw-r--r--deps/v8/src/array.js14
-rw-r--r--deps/v8/src/assembler.cc95
-rw-r--r--deps/v8/src/assembler.h87
-rw-r--r--deps/v8/src/assert-scope.cc21
-rw-r--r--deps/v8/src/assert-scope.h129
-rw-r--r--deps/v8/src/ast.cc49
-rw-r--r--deps/v8/src/ast.h90
-rw-r--r--deps/v8/src/atomicops.h33
-rw-r--r--deps/v8/src/atomicops_internals_arm64_gcc.h372
-rw-r--r--deps/v8/src/atomicops_internals_arm_gcc.h237
-rw-r--r--deps/v8/src/atomicops_internals_atomicword_compat.h122
-rw-r--r--deps/v8/src/atomicops_internals_mac.h (renamed from deps/v8/src/atomicops_internals_x86_macosx.h)104
-rw-r--r--deps/v8/src/atomicops_internals_tsan.h194
-rw-r--r--deps/v8/src/atomicops_internals_x86_msvc.h14
-rw-r--r--deps/v8/src/bootstrapper.cc190
-rw-r--r--deps/v8/src/bootstrapper.h1
-rw-r--r--deps/v8/src/builtins.cc503
-rw-r--r--deps/v8/src/builtins.h6
-rw-r--r--deps/v8/src/char-predicates.h21
-rw-r--r--deps/v8/src/checks.cc37
-rw-r--r--deps/v8/src/checks.h26
-rw-r--r--deps/v8/src/circular-queue.h1
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc251
-rw-r--r--deps/v8/src/code-stubs.cc23
-rw-r--r--deps/v8/src/code-stubs.h205
-rw-r--r--deps/v8/src/codegen.cc10
-rw-r--r--deps/v8/src/codegen.h2
-rw-r--r--deps/v8/src/collection.js173
-rw-r--r--deps/v8/src/compilation-cache.cc10
-rw-r--r--deps/v8/src/compilation-cache.h9
-rw-r--r--deps/v8/src/compiler.cc129
-rw-r--r--deps/v8/src/compiler.h94
-rw-r--r--deps/v8/src/contexts.cc14
-rw-r--r--deps/v8/src/contexts.h81
-rw-r--r--deps/v8/src/conversions-inl.h2
-rw-r--r--deps/v8/src/counters.cc8
-rw-r--r--deps/v8/src/d8-debug.cc2
-rw-r--r--deps/v8/src/d8-debug.h1
-rw-r--r--deps/v8/src/d8.cc82
-rw-r--r--deps/v8/src/d8.h4
-rw-r--r--deps/v8/src/date.cc1
-rw-r--r--deps/v8/src/date.h15
-rw-r--r--deps/v8/src/date.js23
-rw-r--r--deps/v8/src/dateparser.h2
-rw-r--r--deps/v8/src/debug.cc47
-rw-r--r--deps/v8/src/deoptimizer.cc170
-rw-r--r--deps/v8/src/deoptimizer.h4
-rw-r--r--deps/v8/src/disassembler.cc2
-rw-r--r--deps/v8/src/elements-kind.cc21
-rw-r--r--deps/v8/src/elements-kind.h17
-rw-r--r--deps/v8/src/elements.cc695
-rw-r--r--deps/v8/src/elements.h55
-rw-r--r--deps/v8/src/execution.cc56
-rw-r--r--deps/v8/src/execution.h9
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc4
-rw-r--r--deps/v8/src/factory.cc150
-rw-r--r--deps/v8/src/factory.h66
-rw-r--r--deps/v8/src/feedback-slots.h110
-rw-r--r--deps/v8/src/flag-definitions.h79
-rw-r--r--deps/v8/src/frames-inl.h7
-rw-r--r--deps/v8/src/frames.cc8
-rw-r--r--deps/v8/src/frames.h21
-rw-r--r--deps/v8/src/full-codegen.cc65
-rw-r--r--deps/v8/src/full-codegen.h43
-rw-r--r--deps/v8/src/func-name-inferrer.cc11
-rw-r--r--deps/v8/src/func-name-inferrer.h4
-rw-r--r--deps/v8/src/global-handles.cc10
-rw-r--r--deps/v8/src/global-handles.h3
-rw-r--r--deps/v8/src/globals.h56
-rw-r--r--deps/v8/src/handles-inl.h3
-rw-r--r--deps/v8/src/handles.cc39
-rw-r--r--deps/v8/src/harmony-array.js4
-rw-r--r--deps/v8/src/harmony-math.js104
-rw-r--r--deps/v8/src/heap-inl.h84
-rw-r--r--deps/v8/src/heap-profiler.cc5
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc294
-rw-r--r--deps/v8/src/heap-snapshot-generator.h62
-rw-r--r--deps/v8/src/heap.cc576
-rw-r--r--deps/v8/src/heap.h203
-rw-r--r--deps/v8/src/hydrogen-bce.cc47
-rw-r--r--deps/v8/src/hydrogen-check-elimination.cc296
-rw-r--r--deps/v8/src/hydrogen-flow-engine.h19
-rw-r--r--deps/v8/src/hydrogen-gvn.cc510
-rw-r--r--deps/v8/src/hydrogen-gvn.h94
-rw-r--r--deps/v8/src/hydrogen-instructions.cc577
-rw-r--r--deps/v8/src/hydrogen-instructions.h636
-rw-r--r--deps/v8/src/hydrogen-load-elimination.cc92
-rw-r--r--deps/v8/src/hydrogen-minus-zero.cc91
-rw-r--r--deps/v8/src/hydrogen-range-analysis.cc99
-rw-r--r--deps/v8/src/hydrogen-range-analysis.h15
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc10
-rw-r--r--deps/v8/src/hydrogen-store-elimination.cc139
-rw-r--r--deps/v8/src/hydrogen-store-elimination.h (renamed from deps/v8/src/hydrogen-minus-zero.h)27
-rw-r--r--deps/v8/src/hydrogen.cc1267
-rw-r--r--deps/v8/src/hydrogen.h191
-rw-r--r--deps/v8/src/i18n.cc80
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h27
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc30
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h35
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc133
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc500
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h3
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc16
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc35
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc63
-rw-r--r--deps/v8/src/ia32/frames-ia32.h2
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc426
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc26
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc825
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h12
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc4
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc403
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h248
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc165
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h30
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc2
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc168
-rw-r--r--deps/v8/src/ic-inl.h61
-rw-r--r--deps/v8/src/ic.cc402
-rw-r--r--deps/v8/src/ic.h115
-rw-r--r--deps/v8/src/icu_util.cc58
-rw-r--r--deps/v8/src/icu_util.h2
-rw-r--r--deps/v8/src/incremental-marking.cc22
-rw-r--r--deps/v8/src/incremental-marking.h6
-rw-r--r--deps/v8/src/interpreter-irregexp.cc22
-rw-r--r--deps/v8/src/isolate.cc189
-rw-r--r--deps/v8/src/isolate.h216
-rw-r--r--deps/v8/src/json-parser.h8
-rw-r--r--deps/v8/src/json-stringifier.h56
-rw-r--r--deps/v8/src/json.js22
-rw-r--r--deps/v8/src/jsregexp.cc20
-rw-r--r--deps/v8/src/libplatform/default-platform.h4
-rw-r--r--deps/v8/src/lithium-allocator-inl.h2
-rw-r--r--deps/v8/src/lithium-allocator.cc2
-rw-r--r--deps/v8/src/lithium-allocator.h4
-rw-r--r--deps/v8/src/lithium-codegen.cc19
-rw-r--r--deps/v8/src/lithium-codegen.h2
-rw-r--r--deps/v8/src/lithium.cc48
-rw-r--r--deps/v8/src/lithium.h157
-rw-r--r--deps/v8/src/liveedit.cc154
-rw-r--r--deps/v8/src/log.cc30
-rw-r--r--deps/v8/src/log.h9
-rw-r--r--deps/v8/src/macro-assembler.h9
-rw-r--r--deps/v8/src/macros.py1
-rw-r--r--deps/v8/src/mark-compact-inl.h5
-rw-r--r--deps/v8/src/mark-compact.cc261
-rw-r--r--deps/v8/src/mark-compact.h23
-rw-r--r--deps/v8/src/messages.cc6
-rw-r--r--deps/v8/src/messages.h1
-rw-r--r--deps/v8/src/messages.js49
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h28
-rw-r--r--deps/v8/src/mips/assembler-mips.cc24
-rw-r--r--deps/v8/src/mips/assembler-mips.h30
-rw-r--r--deps/v8/src/mips/builtins-mips.cc129
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc474
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h2
-rw-r--r--deps/v8/src/mips/debug-mips.cc10
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc33
-rw-r--r--deps/v8/src/mips/frames-mips.h2
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc393
-rw-r--r--deps/v8/src/mips/ic-mips.cc34
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc688
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h12
-rw-r--r--deps/v8/src/mips/lithium-mips.cc252
-rw-r--r--deps/v8/src/mips/lithium-mips.h243
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc109
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h15
-rw-r--r--deps/v8/src/mips/simulator-mips.cc10
-rw-r--r--deps/v8/src/mips/simulator-mips.h4
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc185
-rw-r--r--deps/v8/src/mirror-debugger.js111
-rw-r--r--deps/v8/src/object-observe.js153
-rw-r--r--deps/v8/src/objects-debug.cc25
-rw-r--r--deps/v8/src/objects-inl.h689
-rw-r--r--deps/v8/src/objects-printer.cc68
-rw-r--r--deps/v8/src/objects-visiting-inl.h33
-rw-r--r--deps/v8/src/objects-visiting.h2
-rw-r--r--deps/v8/src/objects.cc1885
-rw-r--r--deps/v8/src/objects.h669
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc10
-rw-r--r--deps/v8/src/parser.cc2291
-rw-r--r--deps/v8/src/parser.h425
-rw-r--r--deps/v8/src/platform-cygwin.cc4
-rw-r--r--deps/v8/src/platform-freebsd.cc4
-rw-r--r--deps/v8/src/platform-linux.cc7
-rw-r--r--deps/v8/src/platform-macos.cc4
-rw-r--r--deps/v8/src/platform-openbsd.cc4
-rw-r--r--deps/v8/src/platform-posix.cc43
-rw-r--r--deps/v8/src/platform-qnx.cc4
-rw-r--r--deps/v8/src/platform-solaris.cc4
-rw-r--r--deps/v8/src/platform-win32.cc272
-rw-r--r--deps/v8/src/platform.h13
-rw-r--r--deps/v8/src/preparse-data-format.h2
-rw-r--r--deps/v8/src/preparse-data.cc104
-rw-r--r--deps/v8/src/preparse-data.h235
-rw-r--r--deps/v8/src/preparser.cc941
-rw-r--r--deps/v8/src/preparser.h2088
-rw-r--r--deps/v8/src/profile-generator-inl.h2
-rw-r--r--deps/v8/src/promise.js154
-rw-r--r--deps/v8/src/property-details-inl.h51
-rw-r--r--deps/v8/src/property-details.h18
-rw-r--r--deps/v8/src/property.h87
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc5
-rw-r--r--deps/v8/src/regexp-macro-assembler.h1
-rw-r--r--deps/v8/src/runtime.cc1363
-rw-r--r--deps/v8/src/runtime.h260
-rw-r--r--deps/v8/src/runtime.js14
-rw-r--r--deps/v8/src/sampler.cc58
-rw-r--r--deps/v8/src/scanner.cc144
-rw-r--r--deps/v8/src/scanner.h211
-rw-r--r--deps/v8/src/scopeinfo.cc8
-rw-r--r--deps/v8/src/scopes.cc65
-rw-r--r--deps/v8/src/scopes.h40
-rw-r--r--deps/v8/src/serialize.cc64
-rw-r--r--deps/v8/src/serialize.h1
-rw-r--r--deps/v8/src/simulator.h2
-rw-r--r--deps/v8/src/spaces.cc78
-rw-r--r--deps/v8/src/spaces.h81
-rw-r--r--deps/v8/src/store-buffer.cc31
-rw-r--r--deps/v8/src/stub-cache.cc99
-rw-r--r--deps/v8/src/stub-cache.h81
-rw-r--r--deps/v8/src/sweeper-thread.cc1
-rw-r--r--deps/v8/src/symbol.js85
-rw-r--r--deps/v8/src/token.h2
-rw-r--r--deps/v8/src/transitions-inl.h1
-rw-r--r--deps/v8/src/type-info.cc124
-rw-r--r--deps/v8/src/type-info.h22
-rw-r--r--deps/v8/src/typedarray.js24
-rw-r--r--deps/v8/src/types.cc115
-rw-r--r--deps/v8/src/types.h205
-rw-r--r--deps/v8/src/typing.cc22
-rw-r--r--deps/v8/src/unicode.cc40
-rw-r--r--deps/v8/src/unicode.h3
-rw-r--r--deps/v8/src/unique.h4
-rw-r--r--deps/v8/src/uri.h9
-rw-r--r--deps/v8/src/utils.cc14
-rw-r--r--deps/v8/src/utils.h103
-rw-r--r--deps/v8/src/v8.cc33
-rw-r--r--deps/v8/src/v8.h2
-rw-r--r--deps/v8/src/v8globals.h7
-rw-r--r--deps/v8/src/v8natives.js40
-rw-r--r--deps/v8/src/variables.cc4
-rw-r--r--deps/v8/src/variables.h2
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/vm-state-inl.h3
-rw-r--r--deps/v8/src/weak_collection.js206
-rw-r--r--deps/v8/src/win32-headers.h1
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h23
-rw-r--r--deps/v8/src/x64/assembler-x64.cc329
-rw-r--r--deps/v8/src/x64/assembler-x64.h761
-rw-r--r--deps/v8/src/x64/builtins-x64.cc340
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc867
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h22
-rw-r--r--deps/v8/src/x64/codegen-x64.cc36
-rw-r--r--deps/v8/src/x64/debug-x64.cc32
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc86
-rw-r--r--deps/v8/src/x64/disasm-x64.cc60
-rw-r--r--deps/v8/src/x64/frames-x64.h2
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc751
-rw-r--r--deps/v8/src/x64/ic-x64.cc112
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc1097
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h7
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc12
-rw-r--r--deps/v8/src/x64/lithium-x64.cc435
-rw-r--r--deps/v8/src/x64/lithium-x64.h259
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc790
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h43
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc222
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc248
-rw-r--r--deps/v8/src/zone-allocator.h14
-rw-r--r--deps/v8/src/zone-inl.h29
-rw-r--r--deps/v8/src/zone.cc6
-rw-r--r--deps/v8/src/zone.h12
345 files changed, 83351 insertions, 18915 deletions
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 47b0a85633..35cff1af7d 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -119,9 +119,7 @@ bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
CheckForName(name, isolate->heap()->byte_length_string(),
JSTypedArray::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->heap()->byte_offset_string(),
- JSTypedArray::kByteOffsetOffset, object_offset) ||
- CheckForName(name, isolate->heap()->buffer_string(),
- JSTypedArray::kBufferOffset, object_offset);
+ JSTypedArray::kByteOffsetOffset, object_offset);
case JS_ARRAY_BUFFER_TYPE:
return
CheckForName(name, isolate->heap()->byte_length_string(),
@@ -131,9 +129,7 @@ bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
CheckForName(name, isolate->heap()->byte_length_string(),
JSDataView::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->heap()->byte_offset_string(),
- JSDataView::kByteOffsetOffset, object_offset) ||
- CheckForName(name, isolate->heap()->buffer_string(),
- JSDataView::kBufferOffset, object_offset);
+ JSDataView::kByteOffsetOffset, object_offset);
default:
return false;
}
@@ -213,7 +209,9 @@ MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
- return array_handle->SetElementsLength(*uint32_v);
+ Handle<Object> result = JSArray::SetElementsLength(array_handle, uint32_v);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
@@ -351,26 +349,6 @@ const AccessorDescriptor Accessors::ScriptColumnOffset = {
//
-// Accessors::ScriptData
-//
-
-
-MaybeObject* Accessors::ScriptGetData(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->data();
-}
-
-
-const AccessorDescriptor Accessors::ScriptData = {
- ScriptGetData,
- IllegalSetter,
- 0
-};
-
-
-//
// Accessors::ScriptType
//
@@ -620,10 +598,7 @@ MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
}
Handle<Object> old_value;
- bool is_observed =
- FLAG_harmony_observation &&
- *function == *object &&
- function->map()->is_observed();
+ bool is_observed = *function == *object && function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
old_value = handle(function->prototype(), isolate);
@@ -911,10 +886,10 @@ MaybeObject* Accessors::FunctionGetCaller(Isolate* isolate,
if (caller->shared()->bound()) {
return isolate->heap()->null_value();
}
- // Censor if the caller is not a classic mode function.
+ // Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
- if (!caller->shared()->is_classic_mode()) {
+ if (caller->shared()->strict_mode() == STRICT) {
return isolate->heap()->null_value();
}
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index d157aeaadf..83a847222b 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -49,7 +49,6 @@ namespace internal {
V(ScriptId) \
V(ScriptLineOffset) \
V(ScriptColumnOffset) \
- V(ScriptData) \
V(ScriptType) \
V(ScriptCompilationType) \
V(ScriptLineEnds) \
@@ -128,7 +127,6 @@ class Accessors : public AllStatic {
static MaybeObject* ScriptGetColumnOffset(Isolate* isolate,
Object* object,
void*);
- static MaybeObject* ScriptGetData(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetType(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetCompilationType(Isolate* isolate,
Object* object,
diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/allocation-tracker.cc
index 5ec6484601..a9103a84a3 100644
--- a/deps/v8/src/allocation-tracker.cc
+++ b/deps/v8/src/allocation-tracker.cc
@@ -36,9 +36,9 @@ namespace v8 {
namespace internal {
AllocationTraceNode::AllocationTraceNode(
- AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id)
+ AllocationTraceTree* tree, unsigned function_info_index)
: tree_(tree),
- function_id_(shared_function_info_id),
+ function_info_index_(function_info_index),
total_size_(0),
allocation_count_(0),
id_(tree->next_node_id()) {
@@ -50,19 +50,21 @@ AllocationTraceNode::~AllocationTraceNode() {
}
-AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) {
+AllocationTraceNode* AllocationTraceNode::FindChild(
+ unsigned function_info_index) {
for (int i = 0; i < children_.length(); i++) {
AllocationTraceNode* node = children_[i];
- if (node->function_id() == id) return node;
+ if (node->function_info_index() == function_info_index) return node;
}
return NULL;
}
-AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId id) {
- AllocationTraceNode* child = FindChild(id);
+AllocationTraceNode* AllocationTraceNode::FindOrAddChild(
+ unsigned function_info_index) {
+ AllocationTraceNode* child = FindChild(function_info_index);
if (child == NULL) {
- child = new AllocationTraceNode(tree_, id);
+ child = new AllocationTraceNode(tree_, function_info_index);
children_.Add(child);
}
return child;
@@ -78,17 +80,11 @@ void AllocationTraceNode::AddAllocation(unsigned size) {
void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
if (tracker != NULL) {
- const char* name = "<unknown function>";
- if (function_id_ != 0) {
- AllocationTracker::FunctionInfo* info =
- tracker->GetFunctionInfo(function_id_);
- if (info != NULL) {
- name = info->name;
- }
- }
- OS::Print("%s #%u", name, id_);
+ AllocationTracker::FunctionInfo* info =
+ tracker->function_info_list()[function_info_index_];
+ OS::Print("%s #%u", info->name, id_);
} else {
- OS::Print("%u #%u", function_id_, id_);
+ OS::Print("%u #%u", function_info_index_, id_);
}
OS::Print("\n");
indent += 2;
@@ -109,9 +105,9 @@ AllocationTraceTree::~AllocationTraceTree() {
AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
- const Vector<SnapshotObjectId>& path) {
+ const Vector<unsigned>& path) {
AllocationTraceNode* node = root();
- for (SnapshotObjectId* entry = path.start() + path.length() - 1;
+ for (unsigned* entry = path.start() + path.length() - 1;
entry != path.start() - 1;
--entry) {
node = node->FindOrAddChild(*entry);
@@ -126,6 +122,7 @@ void AllocationTraceTree::Print(AllocationTracker* tracker) {
root()->Print(0, tracker);
}
+
void AllocationTracker::DeleteUnresolvedLocation(
UnresolvedLocation** location) {
delete *location;
@@ -134,6 +131,7 @@ void AllocationTracker::DeleteUnresolvedLocation(
AllocationTracker::FunctionInfo::FunctionInfo()
: name(""),
+ function_id(0),
script_name(""),
script_id(0),
line(-1),
@@ -141,26 +139,103 @@ AllocationTracker::FunctionInfo::FunctionInfo()
}
+void AddressToTraceMap::AddRange(Address start, int size,
+ unsigned trace_node_id) {
+ Address end = start + size;
+ RemoveRange(start, end);
+
+ RangeStack new_range(start, trace_node_id);
+ ranges_.insert(RangeMap::value_type(end, new_range));
+}
+
+
+unsigned AddressToTraceMap::GetTraceNodeId(Address addr) {
+ RangeMap::const_iterator it = ranges_.upper_bound(addr);
+ if (it == ranges_.end()) return 0;
+ if (it->second.start <= addr) {
+ return it->second.trace_node_id;
+ }
+ return 0;
+}
+
+
+void AddressToTraceMap::MoveObject(Address from, Address to, int size) {
+ unsigned trace_node_id = GetTraceNodeId(from);
+ if (trace_node_id == 0) return;
+ RemoveRange(from, from + size);
+ AddRange(to, size, trace_node_id);
+}
+
+
+void AddressToTraceMap::Clear() {
+ ranges_.clear();
+}
+
+
+void AddressToTraceMap::Print() {
+ PrintF("[AddressToTraceMap (%" V8PRIuPTR "): \n", ranges_.size());
+ for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
+ PrintF("[%p - %p] => %u\n", it->second.start, it->first,
+ it->second.trace_node_id);
+ }
+ PrintF("]\n");
+}
+
+
+void AddressToTraceMap::RemoveRange(Address start, Address end) {
+ RangeMap::iterator it = ranges_.upper_bound(start);
+ if (it == ranges_.end()) return;
+
+ RangeStack prev_range(0, 0);
+
+ RangeMap::iterator to_remove_begin = it;
+ if (it->second.start < start) {
+ prev_range = it->second;
+ }
+ do {
+ if (it->first > end) {
+ if (it->second.start < end) {
+ it->second.start = end;
+ }
+ break;
+ }
+ ++it;
+ }
+ while (it != ranges_.end());
+
+ ranges_.erase(to_remove_begin, it);
+
+ if (prev_range.start != 0) {
+ ranges_.insert(RangeMap::value_type(start, prev_range));
+ }
+}
+
+
static bool AddressesMatch(void* key1, void* key2) {
return key1 == key2;
}
+void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) {
+ delete *info;
+}
+
+
AllocationTracker::AllocationTracker(
HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids),
names_(names),
- id_to_function_info_(AddressesMatch) {
+ id_to_function_info_index_(AddressesMatch),
+ info_index_for_other_state_(0) {
+ FunctionInfo* info = new FunctionInfo();
+ info->name = "(root)";
+ function_info_list_.Add(info);
}
AllocationTracker::~AllocationTracker() {
unresolved_locations_.Iterate(DeleteUnresolvedLocation);
- for (HashMap::Entry* p = id_to_function_info_.Start();
- p != NULL;
- p = id_to_function_info_.Next(p)) {
- delete reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
- }
+ function_info_list_.Iterate(&DeleteFunctionInfo);
}
@@ -193,13 +268,20 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
SharedFunctionInfo* shared = frame->function()->shared();
SnapshotObjectId id = ids_->FindOrAddEntry(
shared->address(), shared->Size(), false);
- allocation_trace_buffer_[length++] = id;
- AddFunctionInfo(shared, id);
+ allocation_trace_buffer_[length++] = AddFunctionInfo(shared, id);
it.Advance();
}
+ if (length == 0) {
+ unsigned index = functionInfoIndexForVMState(isolate->current_vm_state());
+ if (index != 0) {
+ allocation_trace_buffer_[length++] = index;
+ }
+ }
AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
- Vector<SnapshotObjectId>(allocation_trace_buffer_, length));
+ Vector<unsigned>(allocation_trace_buffer_, length));
top_node->AddAllocation(size);
+
+ address_to_trace_.AddRange(addr, size, top_node->id());
}
@@ -209,24 +291,14 @@ static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
}
-AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo(
- SnapshotObjectId id) {
- HashMap::Entry* entry = id_to_function_info_.Lookup(
- reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false);
- if (entry == NULL) {
- return NULL;
- }
- return reinterpret_cast<FunctionInfo*>(entry->value);
-}
-
-
-void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
- SnapshotObjectId id) {
- HashMap::Entry* entry = id_to_function_info_.Lookup(
+unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_index_.Lookup(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
if (entry->value == NULL) {
FunctionInfo* info = new FunctionInfo();
info->name = names_->GetFunctionName(shared->DebugName());
+ info->function_id = id;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
if (script->name()->IsName()) {
@@ -241,8 +313,22 @@ void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
shared->start_position(),
info));
}
- entry->value = info;
+ entry->value = reinterpret_cast<void*>(function_info_list_.length());
+ function_info_list_.Add(info);
+ }
+ return static_cast<unsigned>(reinterpret_cast<intptr_t>((entry->value)));
+}
+
+
+unsigned AllocationTracker::functionInfoIndexForVMState(StateTag state) {
+ if (state != OTHER) return 0;
+ if (info_index_for_other_state_ == 0) {
+ FunctionInfo* info = new FunctionInfo();
+ info->name = "(V8 API)";
+ info_index_for_other_state_ = function_info_list_.length();
+ function_info_list_.Add(info);
}
+ return info_index_for_other_state_;
}
@@ -267,6 +353,7 @@ AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return;
+ HandleScope scope(script_->GetIsolate());
info_->line = GetScriptLineNumber(script_, start_position_);
info_->column = GetScriptColumnNumber(script_, start_position_);
}
diff --git a/deps/v8/src/allocation-tracker.h b/deps/v8/src/allocation-tracker.h
index 1a5dc9e123..b876d7d14e 100644
--- a/deps/v8/src/allocation-tracker.h
+++ b/deps/v8/src/allocation-tracker.h
@@ -28,6 +28,8 @@
#ifndef V8_ALLOCATION_TRACKER_H_
#define V8_ALLOCATION_TRACKER_H_
+#include <map>
+
namespace v8 {
namespace internal {
@@ -38,13 +40,13 @@ class AllocationTraceTree;
class AllocationTraceNode {
public:
AllocationTraceNode(AllocationTraceTree* tree,
- SnapshotObjectId shared_function_info_id);
+ unsigned function_info_index);
~AllocationTraceNode();
- AllocationTraceNode* FindChild(SnapshotObjectId shared_function_info_id);
- AllocationTraceNode* FindOrAddChild(SnapshotObjectId shared_function_info_id);
+ AllocationTraceNode* FindChild(unsigned function_info_index);
+ AllocationTraceNode* FindOrAddChild(unsigned function_info_index);
void AddAllocation(unsigned size);
- SnapshotObjectId function_id() const { return function_id_; }
+ unsigned function_info_index() const { return function_info_index_; }
unsigned allocation_size() const { return total_size_; }
unsigned allocation_count() const { return allocation_count_; }
unsigned id() const { return id_; }
@@ -54,7 +56,7 @@ class AllocationTraceNode {
private:
AllocationTraceTree* tree_;
- SnapshotObjectId function_id_;
+ unsigned function_info_index_;
unsigned total_size_;
unsigned allocation_count_;
unsigned id_;
@@ -68,7 +70,7 @@ class AllocationTraceTree {
public:
AllocationTraceTree();
~AllocationTraceTree();
- AllocationTraceNode* AddPathFromEnd(const Vector<SnapshotObjectId>& path);
+ AllocationTraceNode* AddPathFromEnd(const Vector<unsigned>& path);
AllocationTraceNode* root() { return &root_; }
unsigned next_node_id() { return next_node_id_++; }
void Print(AllocationTracker* tracker);
@@ -81,11 +83,36 @@ class AllocationTraceTree {
};
+class AddressToTraceMap {
+ public:
+ void AddRange(Address addr, int size, unsigned node_id);
+ unsigned GetTraceNodeId(Address addr);
+ void MoveObject(Address from, Address to, int size);
+ void Clear();
+ size_t size() { return ranges_.size(); }
+ void Print();
+
+ private:
+ struct RangeStack {
+ RangeStack(Address start, unsigned node_id)
+ : start(start), trace_node_id(node_id) {}
+ Address start;
+ unsigned trace_node_id;
+ };
+ // [start, end) -> trace
+ typedef std::map<Address, RangeStack> RangeMap;
+
+ void RemoveRange(Address start, Address end);
+
+ RangeMap ranges_;
+};
+
class AllocationTracker {
public:
struct FunctionInfo {
FunctionInfo();
const char* name;
+ SnapshotObjectId function_id;
const char* script_name;
int script_id;
int line;
@@ -99,11 +126,15 @@ class AllocationTracker {
void AllocationEvent(Address addr, int size);
AllocationTraceTree* trace_tree() { return &trace_tree_; }
- HashMap* id_to_function_info() { return &id_to_function_info_; }
- FunctionInfo* GetFunctionInfo(SnapshotObjectId id);
+ const List<FunctionInfo*>& function_info_list() const {
+ return function_info_list_;
+ }
+ AddressToTraceMap* address_to_trace() { return &address_to_trace_; }
private:
- void AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+ unsigned AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+ static void DeleteFunctionInfo(FunctionInfo** info);
+ unsigned functionInfoIndexForVMState(StateTag state);
class UnresolvedLocation {
public:
@@ -125,9 +156,12 @@ class AllocationTracker {
HeapObjectsMap* ids_;
StringsStorage* names_;
AllocationTraceTree trace_tree_;
- SnapshotObjectId allocation_trace_buffer_[kMaxAllocationTraceLength];
- HashMap id_to_function_info_;
+ unsigned allocation_trace_buffer_[kMaxAllocationTraceLength];
+ List<FunctionInfo*> function_info_list_;
+ HashMap id_to_function_info_index_;
List<UnresolvedLocation*> unresolved_locations_;
+ unsigned info_index_for_other_state_;
+ AddressToTraceMap address_to_trace_;
DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
};
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 54a3e9145d..5dcf592296 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -95,11 +95,6 @@ namespace v8 {
(isolate)->handle_scope_implementer(); \
handle_scope_implementer->DecrementCallDepth(); \
if (has_pending_exception) { \
- if (handle_scope_implementer->CallDepthIsZero() && \
- (isolate)->is_out_of_memory()) { \
- if (!(isolate)->ignore_out_of_memory()) \
- i::V8::FatalProcessOutOfMemory(NULL); \
- } \
bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero(); \
(isolate)->OptionalRescheduleException(call_depth_is_zero); \
do_callback \
@@ -560,8 +555,8 @@ void V8::MakeWeak(i::Object** object,
}
-void V8::ClearWeak(i::Object** obj) {
- i::GlobalHandles::ClearWeakness(obj);
+void* V8::ClearWeak(i::Object** obj) {
+ return i::GlobalHandles::ClearWeakness(obj);
}
@@ -1611,111 +1606,86 @@ ScriptData* ScriptData::New(const char* data, int length) {
}
-// --- S c r i p t ---
+// --- S c r i p t s ---
-Local<Script> Script::New(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data) {
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Isolate* isolate = str->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
- LOG_API(isolate, "Script::New");
- ENTER_V8(isolate);
- i::SharedFunctionInfo* raw_result = NULL;
- { i::HandleScope scope(isolate);
- i::Handle<i::Object> name_obj;
- int line_offset = 0;
- int column_offset = 0;
- bool is_shared_cross_origin = false;
- if (origin != NULL) {
- if (!origin->ResourceName().IsEmpty()) {
- name_obj = Utils::OpenHandle(*origin->ResourceName());
- }
- if (!origin->ResourceLineOffset().IsEmpty()) {
- line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
- }
- if (!origin->ResourceColumnOffset().IsEmpty()) {
- column_offset =
- static_cast<int>(origin->ResourceColumnOffset()->Value());
- }
- if (!origin->ResourceIsSharedCrossOrigin().IsEmpty()) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- is_shared_cross_origin =
- origin->ResourceIsSharedCrossOrigin() == v8::True(v8_isolate);
- }
- }
- EXCEPTION_PREAMBLE(isolate);
- i::ScriptDataImpl* pre_data_impl =
- static_cast<i::ScriptDataImpl*>(pre_data);
- // We assert that the pre-data is sane, even though we can actually
- // handle it if it turns out not to be in release mode.
- ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
- // If the pre-data isn't sane we simply ignore it
- if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
- pre_data_impl = NULL;
- }
- i::Handle<i::SharedFunctionInfo> result =
- i::Compiler::CompileScript(str,
- name_obj,
- line_offset,
- column_offset,
- is_shared_cross_origin,
- isolate->global_context(),
- NULL,
- pre_data_impl,
- Utils::OpenHandle(*script_data, true),
- i::NOT_NATIVES_CODE);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
- raw_result = *result;
+// Internally, UnboundScript is a SharedFunctionInfo, and Script is a
+// JSFunction.
+
+ScriptCompiler::CachedData::CachedData(const uint8_t* data_, int length_,
+ BufferPolicy buffer_policy_)
+ : data(data_), length(length_), buffer_policy(buffer_policy_) {}
+
+
+ScriptCompiler::CachedData::~CachedData() {
+ if (buffer_policy == BufferOwned) {
+ delete[] data;
}
- i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
- return ToApiHandle<Script>(result);
}
-Local<Script> Script::New(v8::Handle<String> source,
- v8::Handle<Value> file_name) {
- ScriptOrigin origin(file_name);
- return New(source, &origin);
+Local<Script> UnboundScript::BindToCurrentContext() {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::SharedFunctionInfo>
+ function_info(i::SharedFunctionInfo::cast(*obj), obj->GetIsolate());
+ i::Handle<i::JSFunction> function =
+ obj->GetIsolate()->factory()->NewFunctionFromSharedFunctionInfo(
+ function_info, obj->GetIsolate()->global_context());
+ return ToApiHandle<Script>(function);
}
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data) {
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Isolate* isolate = str->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
- LOG_API(isolate, "Script::Compile");
- ENTER_V8(isolate);
- Local<Script> generic = New(source, origin, pre_data, script_data);
- if (generic.IsEmpty())
- return generic;
- i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
- i::Handle<i::SharedFunctionInfo> function =
- i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
- i::Handle<i::JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function,
- isolate->global_context());
- return ToApiHandle<Script>(result);
+int UnboundScript::GetId() {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::UnboundScript::GetId()", return -1);
+ LOG_API(isolate, "v8::UnboundScript::GetId");
+ {
+ i::HandleScope scope(isolate);
+ i::Handle<i::SharedFunctionInfo> function_info(
+ i::SharedFunctionInfo::cast(*obj));
+ i::Handle<i::Script> script(i::Script::cast(function_info->script()));
+ return script->id()->value();
+ }
}
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::Handle<Value> file_name,
- v8::Handle<String> script_data) {
- ScriptOrigin origin(file_name);
- return Compile(source, &origin, 0, script_data);
+int UnboundScript::GetLineNumber(int code_pos) {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::UnboundScript::GetLineNumber()", return -1);
+ LOG_API(isolate, "UnboundScript::GetLineNumber");
+ if (obj->IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(*obj));
+ return i::GetScriptLineNumber(script, code_pos);
+ } else {
+ return -1;
+ }
+}
+
+
+Handle<Value> UnboundScript::GetScriptName() {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::UnboundScript::GetName()",
+ return Handle<String>());
+ LOG_API(isolate, "UnboundScript::GetName");
+ if (obj->IsScript()) {
+ i::Object* name = i::Script::cast(*obj)->name();
+ return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
+ } else {
+ return Handle<String>();
+ }
}
Local<Value> Script::Run() {
- // If execution is terminating, Compile(script)->Run() requires this check.
+ // If execution is terminating, Compile(..)->Run() requires this
+ // check.
if (this == NULL) return Local<Value>();
i::Handle<i::HeapObject> obj =
i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
@@ -1728,15 +1698,8 @@ Local<Value> Script::Run() {
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
- i::Handle<i::JSFunction> fun;
- if (obj->IsSharedFunctionInfo()) {
- i::Handle<i::SharedFunctionInfo>
- function_info(i::SharedFunctionInfo::cast(*obj), isolate);
- fun = isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function_info, isolate->global_context());
- } else {
- fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
- }
+ i::Handle<i::JSFunction> fun =
+ i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> receiver(
isolate->context()->global_proxy(), isolate);
@@ -1750,78 +1713,149 @@ Local<Value> Script::Run() {
}
-static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
- i::Handle<i::Object> obj = Utils::OpenHandle(script);
- i::Handle<i::SharedFunctionInfo> result;
- if (obj->IsSharedFunctionInfo()) {
- result =
- i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
- } else {
- result =
- i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared());
+Local<UnboundScript> Script::GetUnboundScript() {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return ToApiHandle<UnboundScript>(
+ i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared()));
+}
+
+
+Local<UnboundScript> ScriptCompiler::CompileUnbound(
+ Isolate* v8_isolate,
+ Source* source,
+ CompileOptions options) {
+ i::ScriptDataImpl* script_data_impl = NULL;
+ i::CachedDataMode cached_data_mode = i::NO_CACHED_DATA;
+ if (options & kProduceDataToCache) {
+ cached_data_mode = i::PRODUCE_CACHED_DATA;
+ ASSERT(source->cached_data == NULL);
+ if (source->cached_data) {
+ // Asked to produce cached data even though there is some already -> not
+ // good. In release mode, try to do the right thing: Just regenerate the
+ // data.
+ delete source->cached_data;
+ source->cached_data = NULL;
+ }
+ } else if (source->cached_data) {
+ // FIXME(marja): Make compiler use CachedData directly. Aligning needs to be
+ // taken care of.
+ script_data_impl = static_cast<i::ScriptDataImpl*>(ScriptData::New(
+ reinterpret_cast<const char*>(source->cached_data->data),
+ source->cached_data->length));
+ // We assert that the pre-data is sane, even though we can actually
+ // handle it if it turns out not to be in release mode.
+ ASSERT(script_data_impl->SanityCheck());
+ if (script_data_impl->SanityCheck()) {
+ cached_data_mode = i::CONSUME_CACHED_DATA;
+ } else {
+ // If the pre-data isn't sane we simply ignore it.
+ delete script_data_impl;
+ script_data_impl = NULL;
+ delete source->cached_data;
+ source->cached_data = NULL;
+ }
}
- return result;
-}
-
-int Script::GetId() {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::Id()", return -1);
- LOG_API(isolate, "Script::Id");
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- return script->id()->value();
+ i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileUnbound()",
+ return Local<UnboundScript>());
+ LOG_API(isolate, "ScriptCompiler::CompileUnbound");
+ ENTER_V8(isolate);
+ i::SharedFunctionInfo* raw_result = NULL;
+ { i::HandleScope scope(isolate);
+ i::Handle<i::Object> name_obj;
+ int line_offset = 0;
+ int column_offset = 0;
+ bool is_shared_cross_origin = false;
+ if (!source->resource_name.IsEmpty()) {
+ name_obj = Utils::OpenHandle(*(source->resource_name));
+ }
+ if (!source->resource_line_offset.IsEmpty()) {
+ line_offset = static_cast<int>(source->resource_line_offset->Value());
+ }
+ if (!source->resource_column_offset.IsEmpty()) {
+ column_offset =
+ static_cast<int>(source->resource_column_offset->Value());
+ }
+ if (!source->resource_is_shared_cross_origin.IsEmpty()) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ is_shared_cross_origin =
+ source->resource_is_shared_cross_origin == v8::True(v8_isolate);
+ }
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::SharedFunctionInfo> result =
+ i::Compiler::CompileScript(str,
+ name_obj,
+ line_offset,
+ column_offset,
+ is_shared_cross_origin,
+ isolate->global_context(),
+ NULL,
+ &script_data_impl,
+ cached_data_mode,
+ i::NOT_NATIVES_CODE);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<UnboundScript>());
+ raw_result = *result;
+ if ((options & kProduceDataToCache) && script_data_impl != NULL) {
+ // script_data_impl now contains the data that was generated. source will
+ // take the ownership.
+ source->cached_data = new CachedData(
+ reinterpret_cast<const uint8_t*>(script_data_impl->Data()),
+ script_data_impl->Length(), CachedData::BufferOwned);
+ script_data_impl->owns_store_ = false;
+ }
+ delete script_data_impl;
}
+ i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
+ return ToApiHandle<UnboundScript>(result);
}
-int Script::GetLineNumber(int code_pos) {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1);
- LOG_API(isolate, "Script::GetLineNumber");
- if (obj->IsScript()) {
- i::Handle<i::Script> script = i::Handle<i::Script>(i::Script::cast(*obj));
- return i::GetScriptLineNumber(script, code_pos);
- } else {
- return -1;
- }
+Local<Script> ScriptCompiler::Compile(
+ Isolate* v8_isolate,
+ Source* source,
+ CompileOptions options) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ON_BAILOUT(isolate, "v8::ScriptCompiler::Compile()",
+ return Local<Script>());
+ LOG_API(isolate, "ScriptCompiler::CompiletBound()");
+ ENTER_V8(isolate);
+ Local<UnboundScript> generic =
+ CompileUnbound(v8_isolate, source, options);
+ if (generic.IsEmpty()) return Local<Script>();
+ return generic->BindToCurrentContext();
}
-Handle<Value> Script::GetScriptName() {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::GetName()", return Handle<String>());
- LOG_API(isolate, "Script::GetName");
- if (obj->IsScript()) {
- i::Object* name = i::Script::cast(*obj)->name();
- return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
- } else {
- return Handle<String>();
+Local<Script> Script::Compile(v8::Handle<String> source,
+ v8::ScriptOrigin* origin,
+ ScriptData* script_data) {
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ ScriptCompiler::CachedData* cached_data = NULL;
+ if (script_data) {
+ cached_data = new ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(script_data->Data()),
+ script_data->Length());
+ }
+ if (origin) {
+ ScriptCompiler::Source script_source(source, *origin, cached_data);
+ return ScriptCompiler::Compile(
+ reinterpret_cast<v8::Isolate*>(str->GetIsolate()),
+ &script_source);
}
+ ScriptCompiler::Source script_source(source, cached_data);
+ return ScriptCompiler::Compile(
+ reinterpret_cast<v8::Isolate*>(str->GetIsolate()),
+ &script_source);
}
-void Script::SetData(v8::Handle<String> data) {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::SetData()", return);
- LOG_API(isolate, "Script::SetData");
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- script->set_data(*raw_data);
- }
+Local<Script> Script::Compile(v8::Handle<String> source,
+ v8::Handle<String> file_name) {
+ ScriptOrigin origin(file_name);
+ return Compile(source, &origin);
}
@@ -1980,21 +2014,6 @@ v8::Handle<Value> Message::GetScriptResourceName() const {
}
-v8::Handle<Value> Message::GetScriptData() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- // Return this.script.data.
- i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
- isolate));
- i::Handle<i::Object> data(i::Script::cast(script->value())->data(), isolate);
- return scope.Escape(Utils::ToLocal(data));
-}
-
-
v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
@@ -2153,9 +2172,10 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
- i::Object* raw_object = self->GetElementNoExceptionThrown(isolate, index);
- i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object));
- return scope.Escape(Utils::StackFrameToLocal(obj));
+ i::Handle<i::Object> obj =
+ i::Object::GetElementNoExceptionThrown(isolate, self, index);
+ i::Handle<i::JSObject> jsobj = i::Handle<i::JSObject>::cast(obj);
+ return scope.Escape(Utils::StackFrameToLocal(jsobj));
}
@@ -2686,6 +2706,20 @@ void v8::Array::CheckCast(Value* that) {
}
+void v8::Promise::CheckCast(Value* that) {
+ Utils::ApiCheck(that->IsPromise(),
+ "v8::Promise::Cast()",
+ "Could not convert to promise");
+}
+
+
+void v8::Promise::Resolver::CheckCast(Value* that) {
+ Utils::ApiCheck(that->IsPromise(),
+ "v8::Promise::Resolver::Cast()",
+ "Could not convert to promise resolver");
+}
+
+
void v8::ArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsJSArrayBuffer(),
@@ -3023,7 +3057,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
key_obj,
value_obj,
static_cast<PropertyAttributes>(attribs),
- i::kNonStrictMode);
+ i::SLOPPY);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
@@ -3043,7 +3077,7 @@ bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
index,
value_obj,
NONE,
- i::kNonStrictMode);
+ i::SLOPPY);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
@@ -3148,7 +3182,8 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
}
i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj);
- PropertyAttributes result = self->GetPropertyAttribute(*key_name);
+ PropertyAttributes result =
+ i::JSReceiver::GetPropertyAttribute(self, key_name);
if (result == ABSENT) return static_cast<PropertyAttribute>(NONE);
return static_cast<PropertyAttribute>(result);
}
@@ -3422,6 +3457,27 @@ bool Object::SetDeclaredAccessor(Local<String> name,
}
+void Object::SetAccessorProperty(Local<String> name,
+ Local<Function> getter,
+ Handle<Function> setter,
+ PropertyAttribute attribute,
+ AccessControl settings) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::SetAccessorProperty()", return);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
+ i::Handle<i::Object> setter_i = v8::Utils::OpenHandle(*setter, true);
+ if (setter_i.is_null()) setter_i = isolate->factory()->null_value();
+ i::JSObject::DefineAccessor(v8::Utils::OpenHandle(this),
+ v8::Utils::OpenHandle(*name),
+ getter_i,
+ setter_i,
+ static_cast<PropertyAttributes>(attribute),
+ settings);
+}
+
+
bool v8::Object::HasOwnProperty(Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
@@ -3675,7 +3731,7 @@ void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
isolate->factory()->NewExternalArray(length, array_type, data);
i::Handle<i::Map> external_array_map =
- isolate->factory()->GetElementsTransitionMap(
+ i::JSObject::GetElementsTransitionMap(
object,
GetElementsKindFromExternalArrayType(array_type));
@@ -4056,7 +4112,9 @@ bool Function::IsBuiltin() const {
int Function::ScriptId() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (!func->shared()->script()->IsScript()) return v8::Script::kNoScriptId;
+ if (!func->shared()->script()->IsScript()) {
+ return v8::UnboundScript::kNoScriptId;
+ }
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
return script->id()->value();
}
@@ -5054,8 +5112,8 @@ int v8::V8::ContextDisposedNotification() {
}
-bool v8::V8::InitializeICU() {
- return i::InitializeICU();
+bool v8::V8::InitializeICU(const char* icu_data_file) {
+ return i::InitializeICU(icu_data_file);
}
@@ -5172,12 +5230,6 @@ Handle<Value> v8::Context::GetSecurityToken() {
}
-bool Context::HasOutOfMemoryException() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- return env->has_out_of_memory();
-}
-
-
v8::Isolate* Context::GetIsolate() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
return reinterpret_cast<Isolate*>(env->GetIsolate());
@@ -5351,6 +5403,8 @@ inline Local<String> NewString(Isolate* v8_isolate,
if (length == -1) length = StringLength(data);
i::Handle<i::String> result = NewString(
isolate->factory(), type, i::Vector<const Char>(data, length));
+ // We do not expect this to fail. Change this if it does.
+ CHECK(!result.is_null());
if (type == String::kUndetectableString) {
result->MarkAsUndetectable();
}
@@ -5408,6 +5462,8 @@ Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
i::Handle<i::String> result = isolate->factory()->NewConsString(left_string,
right_string);
+ // We do not expect this to fail. Change this if it does.
+ CHECK(!result.is_null());
return Utils::ToLocal(result);
}
@@ -5415,14 +5471,22 @@ Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
static i::Handle<i::String> NewExternalStringHandle(
i::Isolate* isolate,
v8::String::ExternalStringResource* resource) {
- return isolate->factory()->NewExternalStringFromTwoByte(resource);
+ i::Handle<i::String> result =
+ isolate->factory()->NewExternalStringFromTwoByte(resource);
+ // We do not expect this to fail. Change this if it does.
+ CHECK(!result.is_null());
+ return result;
}
static i::Handle<i::String> NewExternalAsciiStringHandle(
i::Isolate* isolate,
v8::String::ExternalAsciiStringResource* resource) {
- return isolate->factory()->NewExternalStringFromAscii(resource);
+ i::Handle<i::String> result =
+ isolate->factory()->NewExternalStringFromAscii(resource);
+ // We do not expect this to fail. Change this if it does.
+ CHECK(!result.is_null());
+ return result;
}
@@ -5653,30 +5717,18 @@ void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
i_isolate->date_cache()->ResetDateCache();
- i::HandleScope scope(i_isolate);
- // Get the function ResetDateCache (defined in date.js).
- i::Handle<i::String> func_name_str =
- i_isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("ResetDateCache"));
- i::MaybeObject* result =
- i_isolate->js_builtins_object()->GetProperty(*func_name_str);
- i::Object* object_func;
- if (!result->ToObject(&object_func)) {
+ if (!i_isolate->eternal_handles()->Exists(
+ i::EternalHandles::DATE_CACHE_VERSION)) {
return;
}
-
- if (object_func->IsJSFunction()) {
- i::Handle<i::JSFunction> func =
- i::Handle<i::JSFunction>(i::JSFunction::cast(object_func));
-
- // Call ResetDateCache(0 but expect no exceptions:
- bool caught_exception = false;
- i::Execution::TryCall(func,
- i_isolate->js_builtins_object(),
- 0,
- NULL,
- &caught_exception);
- }
+ i::Handle<i::FixedArray> date_cache_version =
+ i::Handle<i::FixedArray>::cast(i_isolate->eternal_handles()->GetSingleton(
+ i::EternalHandles::DATE_CACHE_VERSION));
+ ASSERT_EQ(1, date_cache_version->length());
+ CHECK(date_cache_version->get(0)->IsSmi());
+ date_cache_version->set(
+ 0,
+ i::Smi::FromInt(i::Smi::cast(date_cache_version->get(0))->value() + 1));
}
@@ -5778,6 +5830,130 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
}
+bool Value::IsPromise() const {
+ i::Handle<i::Object> val = Utils::OpenHandle(this);
+ if (!val->IsJSObject()) return false;
+ i::Handle<i::JSObject> obj = i::Handle<i::JSObject>::cast(val);
+ i::Isolate* isolate = obj->GetIsolate();
+ LOG_API(isolate, "IsPromise");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { obj };
+ i::Handle<i::Object> b = i::Execution::Call(
+ isolate,
+ handle(
+ isolate->context()->global_object()->native_context()->is_promise()),
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(argv), argv,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
+ return b->BooleanValue();
+}
+
+
+Local<Promise::Resolver> Promise::Resolver::New(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ LOG_API(isolate, "Promise::Resolver::New");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> result = i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_create()),
+ isolate->factory()->undefined_value(),
+ 0, NULL,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise::Resolver>());
+ return Local<Promise::Resolver>::Cast(Utils::ToLocal(result));
+}
+
+
+Local<Promise> Promise::Resolver::GetPromise() {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ return Local<Promise>::Cast(Utils::ToLocal(promise));
+}
+
+
+void Promise::Resolver::Resolve(Handle<Value> value) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Resolver::Resolve");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) };
+ i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_resolve()),
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(argv), argv,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
+}
+
+
+void Promise::Resolver::Reject(Handle<Value> value) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Resolver::Reject");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) };
+ i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_reject()),
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(argv), argv,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
+}
+
+
+Local<Promise> Promise::Chain(Handle<Function> handler) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Chain");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
+ i::Handle<i::Object> result = i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_chain()),
+ promise,
+ ARRAY_SIZE(argv), argv,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
+ return Local<Promise>::Cast(Utils::ToLocal(result));
+}
+
+
+Local<Promise> Promise::Catch(Handle<Function> handler) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Catch");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
+ i::Handle<i::Object> result = i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_catch()),
+ promise,
+ ARRAY_SIZE(argv), argv,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
+ return Local<Promise>::Cast(Utils::ToLocal(result));
+}
+
+
bool v8::ArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
@@ -5842,8 +6018,15 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
- ASSERT(obj->buffer()->IsJSArrayBuffer());
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
+ i::Handle<i::JSArrayBuffer> buffer;
+ if (obj->IsJSDataView()) {
+ i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj));
+ ASSERT(data_view->buffer()->IsJSArrayBuffer());
+ buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer()));
+ } else {
+ ASSERT(obj->IsJSTypedArray());
+ buffer = i::JSTypedArray::cast(*obj)->GetBuffer();
+ }
return Utils::ToLocal(buffer);
}
@@ -5914,7 +6097,9 @@ i::Handle<i::JSTypedArray> NewTypedArray(
isolate->factory()->NewExternalArray(
static_cast<int>(length), array_type,
static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- obj->set_elements(*elements);
+ i::Handle<i::Map> map =
+ i::JSObject::GetElementsTransitionMap(obj, elements_kind);
+ obj->set_map_and_elements(*map, *elements);
return obj;
}
@@ -5954,40 +6139,84 @@ Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
}
-Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
+Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
LOG_API(i_isolate, "Symbol::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
- if (data != NULL) {
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- result->set_name(*name);
- }
+ if (!name.IsEmpty()) result->set_name(*Utils::OpenHandle(*name));
return Utils::ToLocal(result);
}
-Local<Private> v8::Private::New(
- Isolate* isolate, const char* data, int length) {
+Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
+ i::Handle<i::String> part = i_isolate->factory()->for_string();
+ i::Handle<i::JSObject> symbols =
+ i::Handle<i::JSObject>::cast(i::JSObject::GetProperty(registry, part));
+ i::Handle<i::Object> symbol = i::JSObject::GetProperty(symbols, i_name);
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = i_isolate->factory()->NewSymbol();
+ i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
+ i::JSObject::SetProperty(symbols, i_name, symbol, NONE, i::STRICT);
+ }
+ return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+}
+
+
+Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
+ i::Handle<i::String> part = i_isolate->factory()->for_api_string();
+ i::Handle<i::JSObject> symbols =
+ i::Handle<i::JSObject>::cast(i::JSObject::GetProperty(registry, part));
+ i::Handle<i::Object> symbol = i::JSObject::GetProperty(symbols, i_name);
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = i_isolate->factory()->NewSymbol();
+ i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
+ i::JSObject::SetProperty(symbols, i_name, symbol, NONE, i::STRICT);
+ }
+ return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+}
+
+
+Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Private::New()");
LOG_API(i_isolate, "Private::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
- if (data != NULL) {
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- symbol->set_name(*name);
- }
+ if (!name.IsEmpty()) symbol->set_name(*Utils::OpenHandle(*name));
Local<Symbol> result = Utils::ToLocal(symbol);
return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
}
+Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
+ i::Handle<i::String> part = i_isolate->factory()->private_api_string();
+ i::Handle<i::JSObject> privates =
+ i::Handle<i::JSObject>::cast(i::JSObject::GetProperty(registry, part));
+ i::Handle<i::Object> symbol = i::JSObject::GetProperty(privates, i_name);
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = i_isolate->factory()->NewPrivateSymbol();
+ i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
+ i::JSObject::SetProperty(privates, i_name, symbol, NONE, i::STRICT);
+ }
+ Local<Symbol> result = Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+ return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
+}
+
+
Local<Number> v8::Number::New(Isolate* isolate, double value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(internal_isolate->IsInitialized());
@@ -6027,11 +6256,6 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
}
-void V8::IgnoreOutOfMemoryException() {
- EnterIsolateIfNeeded()->set_ignore_out_of_memory(true);
-}
-
-
bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
@@ -6280,6 +6504,25 @@ void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
}
+void V8::RunMicrotasks(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::HandleScope scope(i_isolate);
+ i::V8::RunMicrotasks(i_isolate);
+}
+
+
+void V8::EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i::Execution::EnqueueMicrotask(i_isolate, Utils::OpenHandle(*microtask));
+}
+
+
+void V8::SetAutorunMicrotasks(Isolate* isolate, bool autorun) {
+ reinterpret_cast<i::Isolate*>(isolate)->set_autorun_microtasks(autorun);
+}
+
+
void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
i::V8::RemoveCallCompletedCallback(callback);
}
@@ -6369,6 +6612,47 @@ void Isolate::Exit() {
}
+Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
+ Isolate* isolate,
+ Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
+ : on_failure_(on_failure) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (on_failure_ == CRASH_ON_FAILURE) {
+ internal_ = reinterpret_cast<void*>(
+ new i::DisallowJavascriptExecution(i_isolate));
+ } else {
+ ASSERT_EQ(THROW_ON_FAILURE, on_failure);
+ internal_ = reinterpret_cast<void*>(
+ new i::ThrowOnJavascriptExecution(i_isolate));
+ }
+}
+
+
+Isolate::DisallowJavascriptExecutionScope::~DisallowJavascriptExecutionScope() {
+ if (on_failure_ == CRASH_ON_FAILURE) {
+ delete reinterpret_cast<i::DisallowJavascriptExecution*>(internal_);
+ } else {
+ delete reinterpret_cast<i::ThrowOnJavascriptExecution*>(internal_);
+ }
+}
+
+
+Isolate::AllowJavascriptExecutionScope::AllowJavascriptExecutionScope(
+ Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_assert_ = reinterpret_cast<void*>(
+ new i::AllowJavascriptExecution(i_isolate));
+ internal_throws_ = reinterpret_cast<void*>(
+ new i::NoThrowOnJavascriptExecution(i_isolate));
+}
+
+
+Isolate::AllowJavascriptExecutionScope::~AllowJavascriptExecutionScope() {
+ delete reinterpret_cast<i::AllowJavascriptExecution*>(internal_assert_);
+ delete reinterpret_cast<i::NoThrowOnJavascriptExecution*>(internal_throws_);
+}
+
+
void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (!isolate->IsInitialized()) {
@@ -6389,6 +6673,11 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
}
+void Isolate::SetEventLogger(LogEventCallback that) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_event_logger(that);
+}
+
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
@@ -6727,9 +7016,12 @@ Handle<String> CpuProfileNode::GetFunctionName() const {
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(entry->name()));
} else {
- return ToApiHandle<String>(isolate->factory()->NewConsString(
+ i::Handle<i::String> cons = isolate->factory()->NewConsString(
isolate->factory()->InternalizeUtf8String(entry->name_prefix()),
- isolate->factory()->InternalizeUtf8String(entry->name())));
+ isolate->factory()->InternalizeUtf8String(entry->name()));
+ // We do not expect this to fail. Change this if it does.
+ CHECK(!cons.is_null());
+ return ToApiHandle<String>(cons);
}
}
@@ -6845,19 +7137,29 @@ void CpuProfiler::SetSamplingInterval(int us) {
}
-void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
+void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) {
reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), record_samples);
}
-const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
- return reinterpret_cast<const CpuProfile*>(
+void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
+ StartProfiling(title, record_samples);
+}
+
+
+CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
+ return reinterpret_cast<CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
*Utils::OpenHandle(*title)));
}
+const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
+ return StopProfiling(title);
+}
+
+
void CpuProfiler::SetIdle(bool is_idle) {
i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
i::StateTag state = isolate->current_vm_state();
@@ -6939,6 +7241,13 @@ SnapshotObjectId HeapGraphNode::GetId() const {
int HeapGraphNode::GetSelfSize() const {
+ size_t size = ToInternal(this)->self_size();
+ CHECK(size <= static_cast<size_t>(internal::kMaxInt));
+ return static_cast<int>(size);
+}
+
+
+size_t HeapGraphNode::GetShallowSize() const {
return ToInternal(this)->self_size();
}
@@ -7015,9 +7324,6 @@ void HeapSnapshot::Serialize(OutputStream* stream,
Utils::ApiCheck(format == kJSON,
"v8::HeapSnapshot::Serialize",
"Unknown serialization format");
- Utils::ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii,
- "v8::HeapSnapshot::Serialize",
- "Unsupported output encoding");
Utils::ApiCheck(stream->GetChunkSize() > 0,
"v8::HeapSnapshot::Serialize",
"Invalid stream chunk size");
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 9fc99d9d2a..128087c895 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -183,7 +183,8 @@ class RegisteredExtension {
V(DataView, JSDataView) \
V(String, String) \
V(Symbol, Symbol) \
- V(Script, Object) \
+ V(Script, JSFunction) \
+ V(UnboundScript, SharedFunctionInfo) \
V(Function, JSFunction) \
V(Message, JSObject) \
V(Context, Context) \
diff --git a/deps/v8/src/arm/OWNERS b/deps/v8/src/arm/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 3399958ee3..d966380c1e 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -101,7 +101,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -109,7 +109,28 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_pointer_address_at(pc_);
+ if (FLAG_enable_ool_constant_pool ||
+ Assembler::IsMovW(Memory::int32_at(pc_))) {
+ // We return the PC for ool constant pool since this function is used by the
+ // serializerer and expects the address to reside within the code object.
+ return reinterpret_cast<Address>(pc_);
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_pointer_address_at(pc_);
+ }
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ ASSERT(IsInConstantPool());
+ if (FLAG_enable_ool_constant_pool) {
+ ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_constant_pool_address_at(pc_,
+ host_->constant_pool());
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_pointer_address_at(pc_);
+ }
}
@@ -120,7 +141,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -131,21 +152,22 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
+ Assembler::target_address_at(pc_, host_)));
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@@ -157,7 +179,7 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -268,7 +290,7 @@ void RelocInfo::WipeOut() {
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, NULL);
+ Assembler::set_target_address_at(pc_, host_, NULL);
}
@@ -402,7 +424,18 @@ Address Assembler::target_pointer_address_at(Address pc) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool) {
+ ASSERT(constant_pool != NULL);
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ Instr instr = Memory::int32_at(pc);
+ return reinterpret_cast<Address>(constant_pool) +
+ GetLdrRegisterImmediateOffset(instr);
+}
+
+
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
@@ -410,9 +443,14 @@ Address Assembler::target_address_at(Address pc) {
return reinterpret_cast<Address>(
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
+ } else if (FLAG_enable_ool_constant_pool) {
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ return Memory::Address_at(
+ target_constant_pool_address_at(pc, constant_pool));
+ } else {
+ ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+ return Memory::Address_at(target_pointer_address_at(pc));
}
- ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
- return Memory::Address_at(target_pointer_address_at(pc));
}
@@ -430,7 +468,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
- if (IsLdrPcImmediateOffset(candidate_instr)) {
+ if (IsLdrPcImmediateOffset(candidate_instr) |
+ IsLdrPpImmediateOffset(candidate_instr)) {
return candidate;
}
candidate = pc - 3 * Assembler::kInstrSize;
@@ -441,7 +480,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
Address Assembler::return_address_from_call_start(Address pc) {
- if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
+ if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
+ IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
return pc + kInstrSize * 2;
} else {
ASSERT(IsMovW(Memory::int32_at(pc)));
@@ -452,8 +492,12 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
+ Address constant_pool_entry, Code* code, Address target) {
+ if (FLAG_enable_ool_constant_pool) {
+ set_target_address_at(constant_pool_entry, code, target);
+ } else {
+ Memory::Address_at(constant_pool_entry) = target;
+ }
}
@@ -463,7 +507,9 @@ static Instr EncodeMovwImmediate(uint32_t immediate) {
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
@@ -479,6 +525,10 @@ void Assembler::set_target_address_at(Address pc, Address target) {
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
CPU::FlushICache(pc, 2 * kInstrSize);
+ } else if (FLAG_enable_ool_constant_pool) {
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ Memory::Address_at(
+ target_constant_pool_address_at(pc, constant_pool)) = target;
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(target_pointer_address_at(pc)) = target;
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 35279e557c..297cdcc039 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -293,10 +293,20 @@ const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on ARM means that it is a movw/movt instruction. We don't
- // generate those yet.
- return false;
+ // The deserializer needs to know whether a pointer is specially coded.  Being
+ // specially coded on ARM means that it is a movw/movt instruction, or is an
+ // out of line constant pool entry.  These only occur if
+ // FLAG_enable_ool_constant_pool is true.
+ return FLAG_enable_ool_constant_pool;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ if (FLAG_enable_ool_constant_pool) {
+ return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
+ } else {
+ return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
+ }
}
@@ -344,12 +354,17 @@ Operand::Operand(Handle<Object> handle) {
Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
ASSERT(is_uint5(shift_imm));
- ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
+
rm_ = rm;
rs_ = no_reg;
shift_op_ = shift_op;
shift_imm_ = shift_imm & 31;
- if (shift_op == RRX) {
+
+ if ((shift_op == ROR) && (shift_imm == 0)) {
+ // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
+ // RRX as ROR #0 (See below).
+ shift_op = LSL;
+ } else if (shift_op == RRX) {
// encoded as ROR with shift_imm == 0
ASSERT(shift_imm == 0);
shift_op_ = ROR;
@@ -475,9 +490,15 @@ const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+// ldr rd, [pp, #offset]
+const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
+// vldr dd, [pp, #offset]
+const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
+const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -515,6 +536,7 @@ const Instr kLdrStrOffsetMask = 0x00000fff;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
+ constant_pool_builder_(),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_32_bit_reloc_info_ = 0;
@@ -525,6 +547,8 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
+ constant_pool_available_ = !FLAG_enable_ool_constant_pool;
+ constant_pool_full_ = false;
ClearRecordedAstId();
}
@@ -535,11 +559,12 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_pending_32_bit_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
-
+ if (!FLAG_enable_ool_constant_pool) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ }
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -722,6 +747,13 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
}
+bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // ldr<cond> <Rd>, [pp +/- offset_12].
+ return (instr & kLdrPpMask) == kLdrPpPattern;
+}
+
+
bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// vldr<cond> <Dd>, [pc +/- offset_10].
@@ -729,6 +761,13 @@ bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
}
+bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // vldr<cond> <Dd>, [pp +/- offset_10].
+ return (instr & kVldrDPpMask) == kVldrDPpPattern;
+}
+
+
bool Assembler::IsTstImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | TST | S);
@@ -1054,14 +1093,24 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
}
-static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
- if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
+static bool use_mov_immediate_load(const Operand& x,
+ const Assembler* assembler) {
+ if (assembler != NULL && !assembler->can_use_constant_pool()) {
+ // If there is no constant pool available, we must use an mov immediate.
+ // TODO(rmcilroy): enable ARMv6 support.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
return true;
- }
- if (x.must_output_reloc_info(assembler)) {
+ } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ (assembler == NULL || !assembler->predictable_code_size())) {
+ // Prefer movw / movt to constant pool if it is more efficient on the CPU.
+ return true;
+ } else if (x.must_output_reloc_info(assembler)) {
+ // Prefer constant pool if data is likely to be patched.
return false;
+ } else {
+ // Otherwise, use immediate load if movw / movt is available.
+ return CpuFeatures::IsSupported(ARMv7);
}
- return CpuFeatures::IsSupported(ARMv7);
}
@@ -1075,7 +1124,7 @@ bool Operand::is_single_instruction(const Assembler* assembler,
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- return !use_movw_movt(*this, assembler);
+ return !use_mov_immediate_load(*this, assembler);
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
@@ -1091,26 +1140,33 @@ bool Operand::is_single_instruction(const Assembler* assembler,
}
-void Assembler::move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x) {
- if (rd.code() != pc.code() && s == LeaveCC) {
- if (use_movw_movt(x, this)) {
- if (x.must_output_reloc_info(this)) {
- RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
- // Make sure the movw/movt doesn't get separated.
- BlockConstPoolFor(2);
- }
- emit(cond | 0x30*B20 | rd.code()*B12 |
- EncodeMovwImmediate(x.imm32_ & 0xffff));
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- return;
- }
+void Assembler::move_32_bit_immediate(Register rd,
+ const Operand& x,
+ Condition cond) {
+ RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
+ if (x.must_output_reloc_info(this)) {
+ RecordRelocInfo(rinfo);
}
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(rd, MemOperand(pc, 0), cond);
+ if (use_mov_immediate_load(x, this)) {
+ Register target = rd.code() == pc.code() ? ip : rd;
+ // TODO(rmcilroy): add ARMv6 support for immediate loads.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
+ // Make sure the movw/movt doesn't get separated.
+ BlockConstPoolFor(2);
+ }
+ emit(cond | 0x30*B20 | target.code()*B12 |
+ EncodeMovwImmediate(x.imm32_ & 0xffff));
+ movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ if (target.code() != rd.code()) {
+ mov(rd, target, LeaveCC, cond);
+ }
+ } else {
+ ASSERT(can_use_constant_pool());
+ ConstantPoolAddEntry(rinfo);
+ ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+ }
}
@@ -1133,20 +1189,9 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- move_32_bit_immediate(cond, rd, LeaveCC, x);
+ move_32_bit_immediate(rd, x, cond);
} else {
- if ((instr & kMovMvnMask) == kMovMvnPattern) {
- // Moves need to use a constant pool entry.
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(ip, MemOperand(pc, 0), cond);
- } else if (x.must_output_reloc_info(this)) {
- // Otherwise, use most efficient form of fetching from constant pool.
- move_32_bit_immediate(cond, ip, LeaveCC, x);
- } else {
- // If this is not a mov or mvn instruction we may still be able to
- // avoid a constant pool entry by using mvn or movw.
- mov(ip, x, LeaveCC, cond);
- }
+ mov(ip, x, LeaveCC, cond);
addrmod1(instr, rn, rd, Operand(ip));
}
return;
@@ -1748,7 +1793,9 @@ void Assembler::uxtb(Register dst,
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
- ASSERT(src.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src.shift_op() == ROR) ||
+ ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
@@ -1770,7 +1817,9 @@ void Assembler::uxtab(Register dst,
(src2.shift_imm_ == 8) ||
(src2.shift_imm_ == 16) ||
(src2.shift_imm_ == 24));
- ASSERT(src2.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src2.shift_op() == ROR) ||
+ ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
}
@@ -1790,7 +1839,9 @@ void Assembler::uxtb16(Register dst,
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
- ASSERT(src.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src.shift_op() == ROR) ||
+ ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
@@ -1814,8 +1865,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
if (src.must_output_reloc_info(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
- RecordRelocInfo(src.rmode_, src.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
+ move_32_bit_immediate(ip, src);
msr(fields, Operand(ip), cond);
return;
}
@@ -2422,7 +2472,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm) {
+ } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@@ -2438,8 +2488,9 @@ void Assembler::vmov(const DwVfpRegister dst,
// The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
- RecordRelocInfo(imm);
- vldr(dst, MemOperand(pc, 0));
+ RelocInfo rinfo(pc_, imm);
+ ConstantPoolAddEntry(rinfo);
+ vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@@ -3169,6 +3220,7 @@ void Assembler::GrowBuffer() {
ASSERT(rinfo.rmode() == RelocInfo::NONE64);
rinfo.set_pc(rinfo.pc() + pc_delta);
}
+ constant_pool_builder_.Relocate(pc_delta);
}
@@ -3204,28 +3256,16 @@ void Assembler::emit_code_stub_address(Code* stub) {
}
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
- UseConstantPoolMode mode) {
- // We do not try to reuse pool constants.
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
- (rmode == RelocInfo::CONST_POOL) ||
- mode == DONT_USE_CONSTANT_POOL) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsConstPool(rmode)
- || mode == DONT_USE_CONSTANT_POOL);
- // These modes do not need an entry in the constant pool.
- } else {
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
- }
+ RecordRelocInfo(rinfo);
+}
+
+
+void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
@@ -3236,9 +3276,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
+ if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(rinfo.pc(),
+ rinfo.rmode(),
RecordedAstId().ToInt(),
NULL);
ClearRecordedAstId();
@@ -3250,34 +3290,38 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
-void Assembler::RecordRelocInfo(double data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, data);
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
-}
-
-
-void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
- if (rinfo.rmode() == RelocInfo::NONE64) {
- ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
- if (num_pending_64_bit_reloc_info_ == 0) {
- first_const_pool_64_use_ = pc_offset();
- }
- pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool_builder_.AddEntry(this, rinfo);
} else {
- ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
- if (num_pending_32_bit_reloc_info_ == 0) {
- first_const_pool_32_use_ = pc_offset();
+ if (rinfo.rmode() == RelocInfo::NONE64) {
+ ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
+ if (num_pending_64_bit_reloc_info_ == 0) {
+ first_const_pool_64_use_ = pc_offset();
+ }
+ pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+ } else {
+ ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
+ if (num_pending_32_bit_reloc_info_ == 0) {
+ first_const_pool_32_use_ = pc_offset();
+ }
+ pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
}
- pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
}
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
}
void Assembler::BlockConstPoolFor(int instructions) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// Max pool start (if we need a jump and an alignment).
@@ -3299,6 +3343,13 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
// BlockConstPoolScope.
@@ -3496,6 +3547,195 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ return constant_pool_builder_.Allocate(heap);
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ constant_pool_builder_.Populate(this, constant_pool);
+}
+
+
+ConstantPoolBuilder::ConstantPoolBuilder()
+ : entries_(),
+ merged_indexes_(),
+ count_of_64bit_(0),
+ count_of_code_ptr_(0),
+ count_of_heap_ptr_(0),
+ count_of_32bit_(0) { }
+
+
+bool ConstantPoolBuilder::IsEmpty() {
+ return entries_.size() == 0;
+}
+
+
+bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
+ return rmode == RelocInfo::NONE64;
+}
+
+
+bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
+ return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
+}
+
+
+bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
+ return RelocInfo::IsCodeTarget(rmode);
+}
+
+
+bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
+ return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
+}
+
+
+void ConstantPoolBuilder::AddEntry(Assembler* assm,
+ const RelocInfo& rinfo) {
+ RelocInfo::Mode rmode = rinfo.rmode();
+ ASSERT(rmode != RelocInfo::COMMENT &&
+ rmode != RelocInfo::POSITION &&
+ rmode != RelocInfo::STATEMENT_POSITION &&
+ rmode != RelocInfo::CONST_POOL);
+
+
+ // Try to merge entries which won't be patched.
+ int merged_index = -1;
+ if (RelocInfo::IsNone(rmode) ||
+ (!Serializer::enabled() && (rmode >= RelocInfo::CELL))) {
+ size_t i;
+ std::vector<RelocInfo>::const_iterator it;
+ for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
+ if (RelocInfo::IsEqual(rinfo, *it)) {
+ merged_index = i;
+ break;
+ }
+ }
+ }
+
+ entries_.push_back(rinfo);
+ merged_indexes_.push_back(merged_index);
+
+ if (merged_index == -1) {
+ // Not merged, so update the appropriate count.
+ if (Is64BitEntry(rmode)) {
+ count_of_64bit_++;
+ } else if (Is32BitEntry(rmode)) {
+ count_of_32bit_++;
+ } else if (IsCodePtrEntry(rmode)) {
+ count_of_code_ptr_++;
+ } else {
+ ASSERT(IsHeapPtrEntry(rmode));
+ count_of_heap_ptr_++;
+ }
+ }
+
+ // Check if we still have room for another entry given Arm's ldr and vldr
+ // immediate offset range.
+ if (!(is_uint12(ConstantPoolArray::SizeFor(count_of_64bit_,
+ count_of_code_ptr_,
+ count_of_heap_ptr_,
+ count_of_32bit_))) &&
+ is_uint10(ConstantPoolArray::SizeFor(count_of_64bit_, 0, 0, 0))) {
+ assm->set_constant_pool_full();
+ }
+}
+
+
+void ConstantPoolBuilder::Relocate(int pc_delta) {
+ for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
+ rinfo != entries_.end(); rinfo++) {
+ ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
+ rinfo->set_pc(rinfo->pc() + pc_delta);
+ }
+}
+
+
+MaybeObject* ConstantPoolBuilder::Allocate(Heap* heap) {
+ if (IsEmpty()) {
+ return heap->empty_constant_pool_array();
+ } else {
+ return heap->AllocateConstantPoolArray(count_of_64bit_, count_of_code_ptr_,
+ count_of_heap_ptr_, count_of_32bit_);
+ }
+}
+
+
+void ConstantPoolBuilder::Populate(Assembler* assm,
+ ConstantPoolArray* constant_pool) {
+ ASSERT(constant_pool->count_of_int64_entries() == count_of_64bit_);
+ ASSERT(constant_pool->count_of_code_ptr_entries() == count_of_code_ptr_);
+ ASSERT(constant_pool->count_of_heap_ptr_entries() == count_of_heap_ptr_);
+ ASSERT(constant_pool->count_of_int32_entries() == count_of_32bit_);
+ ASSERT(entries_.size() == merged_indexes_.size());
+
+ int index_64bit = 0;
+ int index_code_ptr = count_of_64bit_;
+ int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
+ int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
+
+ size_t i;
+ std::vector<RelocInfo>::const_iterator rinfo;
+ for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
+ RelocInfo::Mode rmode = rinfo->rmode();
+
+ // Update constant pool if necessary and get the entry's offset.
+ int offset;
+ if (merged_indexes_[i] == -1) {
+ if (Is64BitEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag;
+ constant_pool->set(index_64bit++, rinfo->data64());
+ } else if (Is32BitEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag;
+ constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
+ } else if (IsCodePtrEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
+ kHeapObjectTag;
+ constant_pool->set(index_code_ptr++,
+ reinterpret_cast<Object *>(rinfo->data()));
+ } else {
+ ASSERT(IsHeapPtrEntry(rmode));
+ offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
+ kHeapObjectTag;
+ constant_pool->set(index_heap_ptr++,
+ reinterpret_cast<Object *>(rinfo->data()));
+ }
+ merged_indexes_[i] = offset; // Stash offset for merged entries.
+ } else {
+ size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
+ ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
+ offset = merged_indexes_[merged_index];
+ }
+
+ // Patch vldr/ldr instruction with correct offset.
+ Instr instr = assm->instr_at(rinfo->pc());
+ if (Is64BitEntry(rmode)) {
+ // Instruction to patch must be 'vldr rd, [pp, #0]'.
+ ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
+ Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
+ ASSERT(is_uint10(offset));
+ assm->instr_at_put(rinfo->pc(),
+ Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
+ } else {
+ // Instruction to patch must be 'ldr rd, [pp, #0]'.
+ ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
+ Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
+ ASSERT(is_uint12(offset));
+ assm->instr_at_put(rinfo->pc(),
+ Assembler::SetLdrRegisterImmediateOffset(instr, offset));
+ }
+ }
+
+ ASSERT((index_64bit == count_of_64bit_) &&
+ (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
+ (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
+ (index_32bit == (index_heap_ptr + count_of_32bit_)));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index ccb5104206..727b054211 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -39,7 +39,10 @@
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
+
#include <stdio.h>
+#include <vector>
+
#include "assembler.h"
#include "constants-arm.h"
#include "serialize.h"
@@ -376,8 +379,9 @@ struct QwNeonRegister {
}
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
+ int encoded_code = code_ << 1;
+ *m = (encoded_code & 0x10) >> 4;
+ *vm = encoded_code & 0x0F;
}
int code_;
@@ -702,9 +706,42 @@ class NeonListOperand BASE_EMBEDDED {
NeonListType type_;
};
+
+// Class used to build a constant pool.
+class ConstantPoolBuilder BASE_EMBEDDED {
+ public:
+ explicit ConstantPoolBuilder();
+ void AddEntry(Assembler* assm, const RelocInfo& rinfo);
+ void Relocate(int pc_delta);
+ bool IsEmpty();
+ MaybeObject* Allocate(Heap* heap);
+ void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
+
+ inline int count_of_64bit() const { return count_of_64bit_; }
+ inline int count_of_code_ptr() const { return count_of_code_ptr_; }
+ inline int count_of_heap_ptr() const { return count_of_heap_ptr_; }
+ inline int count_of_32bit() const { return count_of_32bit_; }
+
+ private:
+ bool Is64BitEntry(RelocInfo::Mode rmode);
+ bool Is32BitEntry(RelocInfo::Mode rmode);
+ bool IsCodePtrEntry(RelocInfo::Mode rmode);
+ bool IsHeapPtrEntry(RelocInfo::Mode rmode);
+
+ std::vector<RelocInfo> entries_;
+ std::vector<int> merged_indexes_;
+ int count_of_64bit_;
+ int count_of_code_ptr_;
+ int count_of_heap_ptr_;
+ int count_of_32bit_;
+};
+
+
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
+extern const Instr kLdrPpMask;
+extern const Instr kLdrPpPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kBlxIp;
@@ -780,9 +817,27 @@ class Assembler : public AssemblerBase {
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc, or the object in a mov.
+ INLINE(static Address target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool));
+
// Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc));
- INLINE(static void set_target_address_at(Address pc, Address target));
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool));
+ INLINE(static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target));
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ Code* code,
+ Address target)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -795,7 +850,7 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Address target);
+ Address constant_pool_entry, Code* code, Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
@@ -1292,12 +1347,6 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
- static bool use_immediate_embedded_pointer_loads(
- const Assembler* assembler) {
- return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- (assembler == NULL || !assembler->predictable_code_size());
- }
-
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@@ -1401,6 +1450,8 @@ class Assembler : public AssemblerBase {
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
+ static bool IsLdrPpImmediateOffset(Instr instr);
+ static bool IsVldrDPpImmediateOffset(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
@@ -1446,6 +1497,20 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ bool can_use_constant_pool() const {
+ return is_constant_pool_available() && !constant_pool_full_;
+ }
+
+ void set_constant_pool_full() {
+ constant_pool_full_ = true;
+ }
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1499,6 +1564,14 @@ class Assembler : public AssemblerBase {
(pc_offset() < no_const_pool_before_);
}
+ bool is_constant_pool_available() const {
+ return constant_pool_available_;
+ }
+
+ void set_constant_pool_available(bool available) {
+ constant_pool_available_ = available;
+ }
+
private:
int next_buffer_check_; // pc offset of next buffer check
@@ -1556,19 +1629,27 @@ class Assembler : public AssemblerBase {
// Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
+ ConstantPoolBuilder constant_pool_builder_;
+
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
+ // Indicates whether the constant pool can be accessed, which is only possible
+ // if the pp register points to the current code object's constant pool.
+ bool constant_pool_available_;
+ // Indicates whether the constant pool is too full to accept new entries due
+ // to the ldr instruction's limitted immediate offset range.
+ bool constant_pool_full_;
+
// Code emission
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
// 32-bit immediate values
- void move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x);
+ void move_32_bit_immediate(Register rd,
+ const Operand& x,
+ Condition cond = al);
// Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
@@ -1588,14 +1669,15 @@ class Assembler : public AssemblerBase {
};
// Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
- UseConstantPoolMode mode = USE_CONSTANT_POOL);
- void RecordRelocInfo(double data);
- void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void RecordRelocInfo(const RelocInfo& rinfo);
+ void ConstantPoolAddEntry(const RelocInfo& rinfo);
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
+ friend class FrameAndConstantPoolScope;
+ friend class ConstantPoolUnavailableScope;
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 7898086c07..f138146417 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -155,10 +155,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -262,7 +259,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
@@ -282,7 +279,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
@@ -292,7 +289,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push function as parameter to the runtime call.
@@ -329,7 +326,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@@ -339,10 +336,12 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool count_constructions,
+ bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- r2 : allocation site or undefined
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -350,11 +349,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
+
Isolate* isolate = masm->isolate();
// Enter a construct frame.
{
- FrameScope scope(masm, StackFrame::CONSTRUCT);
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(r2, r3);
+ __ push(r2);
+ }
// Preserve the two incoming parameters on the stack.
__ SmiTag(r0);
@@ -405,7 +415,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(r2, r1); // r1 = constructor
// The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ pop(r2);
__ pop(r1);
@@ -417,13 +427,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r2: initial map
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ add(r3, r3, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
__ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
- // r3: object size
+ // r3: object size (not including memento if create_memento)
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
@@ -437,12 +451,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with the appropriate filler.
// r1: constructor function
// r2: initial map
- // r3: object size (in words)
+ // r3: object size (in words, including memento if create_memento)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+
if (count_constructions) {
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
@@ -456,9 +471,28 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+ } else if (create_memento) {
+ __ sub(r6, r3, Operand(AllocationMemento::kSize / kPointerSize));
+ __ add(r0, r4, Operand(r6, LSL, kPointerSizeLog2)); // End of object.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+
+ // Fill in memento fields.
+ // r5: points to the allocated but uninitialized memento.
+ __ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ // Load the AllocationSite
+ __ ldr(r6, MemOperand(sp, 2 * kPointerSize));
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ } else {
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
}
- __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- __ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -556,13 +590,47 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// r1: constructor function
__ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+ __ push(r2);
+ }
+
__ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(r4, r0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// Receiver for constructor call allocated.
// r4: JSObject
__ bind(&allocated);
+
+ if (create_memento) {
+ __ ldr(r2, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ cmp(r2, r5);
+ __ b(eq, &count_incremented);
+ // r2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ ldr(r3, FieldMemOperand(r2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ add(r3, r3, Operand(Smi::FromInt(1)));
+ __ str(r3, FieldMemOperand(r2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
__ push(r4);
__ push(r4);
@@ -665,17 +733,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
@@ -738,9 +806,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r0, Operand(r3));
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -768,13 +834,13 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push function as parameter to the runtime call.
@@ -782,7 +848,7 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
__ pop(r1);
}
@@ -870,14 +936,14 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
@@ -899,11 +965,11 @@ void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> r6.
@@ -947,7 +1013,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
@@ -963,20 +1029,26 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ ldr(r1, MemOperand(r0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
- // Load the OSR entrypoint offset from the deoptimization data.
- // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ ldr(r1, MemOperand(r1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ if (FLAG_enable_ool_constant_pool) {
+ __ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
+ }
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ add(r0, r0, Operand::SmiUntag(r1));
- __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
- // And "return" to the OSR entry point of the function.
- __ Ret();
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ add(r0, r0, Operand::SmiUntag(r1));
+ __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+ }
}
@@ -987,8 +1059,8 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1039,7 +1111,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
@@ -1062,7 +1134,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
// Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r0);
__ push(r0);
@@ -1189,7 +1261,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kFunctionOffset = 4 * kPointerSize;
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
@@ -1247,7 +1319,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
@@ -1354,8 +1426,14 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// then tear down the parameters.
__ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize)));
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
+
+ if (FLAG_enable_ool_constant_pool) {
+ __ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ __ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
+ } else {
+ __ mov(sp, fp);;
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ }
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 44de7aabc3..832296b273 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -45,7 +45,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
}
@@ -76,7 +76,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
@@ -87,7 +87,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
@@ -98,15 +99,15 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { r2, r3 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -141,7 +142,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
}
@@ -165,6 +166,26 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0, r2 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -226,7 +247,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
@@ -254,7 +275,7 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
@@ -365,7 +386,7 @@ void StringAddStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
}
@@ -490,7 +511,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
ASSERT(descriptor->register_param_count_ == 0 ||
r0.is(descriptor->register_params_[param_count - 1]));
// Push arguments
@@ -602,6 +623,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Register result_reg = destination();
+ ASSERT(is_truncating());
int double_offset = offset();
// Account for saved regs if input is sp.
@@ -1480,22 +1502,9 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, value, Operand(0xf));
- __ cmp(scratch, Operand(0xf));
- __ b(eq, oom_label);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -1554,9 +1563,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
{
// Prevent literal pool emission before return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
- masm->add(lr, pc, Operand(4));
+ __ add(lr, pc, Operand(4));
__ str(lr, MemOperand(sp, 0));
- masm->Jump(r5);
+ __ Call(r5);
}
__ VFPEnsureFPSCRState(r2);
@@ -1593,26 +1602,21 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ b(eq, &retry);
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
-
// Retrieve the pending exception.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
-
// Clear the pending exception.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r3, MemOperand(ip));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(r0, Operand(isolate->factory()->termination_exception()));
+ __ LoadRoot(r3, Heap::kTerminationExceptionRootIndex);
+ __ cmp(r0, r3);
__ b(eq, throw_termination_exception);
// Handle normal exception.
@@ -1644,7 +1648,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Set up argc and the builtin function in callee-saved registers.
@@ -1657,13 +1661,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
// Call into the runtime system.
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
false,
false);
@@ -1671,7 +1673,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
false);
@@ -1681,29 +1682,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
true);
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(r0, Operand(false, RelocInfo::NONE32));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, r0, ip, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r0, MemOperand(r2));
- // Fall through to the next label.
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, r0);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0, 0);
+ }
__ bind(&throw_termination_exception);
__ ThrowUncatchable(r0);
@@ -1755,7 +1741,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Isolate* isolate = masm->isolate();
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
if (FLAG_enable_ool_constant_pool) {
- __ mov(r8, Operand(Smi::FromInt(marker)));
+ __ mov(r8, Operand(isolate->factory()->empty_constant_pool_array()));
}
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
@@ -1843,16 +1829,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ mov(ip, Operand(entry));
}
__ ldr(ip, MemOperand(ip)); // deref address
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Branch and link to JSEntryTrampoline. We don't use the double underscore
- // macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc. We block literal pool
- // emission for the same reason.
- {
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
+ // Branch and link to JSEntryTrampoline.
+ __ Call(ip);
// Unlink this frame from the handler chain.
__ PopTryHandler();
@@ -1897,8 +1877,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// * function: r1 or at sp.
//
// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register r4.
+// In this case the offset to the inline site to patch is passed in r5.
// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
@@ -1957,14 +1936,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
- // The offset was stored in r4 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ LoadFromSafepointRegisterSlot(scratch, r4);
- __ sub(inline_site, lr, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ ldr(scratch, MemOperand(scratch));
- __ str(map, FieldMemOperand(scratch, Cell::kValueOffset));
+ // The offset was stored in r5
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register offset = r5;
+ __ sub(inline_site, lr, offset);
+ // Get the map location in r5 and patch it.
+ __ GetRelocatedValueLocation(inline_site, offset);
+ __ ldr(offset, MemOperand(offset));
+ __ str(map, FieldMemOperand(offset, Cell::kValueOffset));
}
// Register mapping: r3 is object map and r4 is function prototype.
@@ -2057,7 +2036,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r0, r1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
@@ -2099,108 +2078,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = r0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
-
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -----------------------------------
- __ cmp(r1, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r2;
- value = r0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : key
- // -----------------------------------
- receiver = r1;
- value = r0;
- }
- Register scratch = r3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
- __ b(eq, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
Register InstanceofStub::left() { return r0; }
@@ -2258,7 +2135,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
@@ -2278,11 +2155,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ str(r3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
@@ -2336,7 +2213,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ add(r9, r9, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
+ __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
@@ -2345,7 +2222,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r2 = argument count (tagged)
// Get the arguments boilerplate from the current native context into r4.
const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
@@ -2381,7 +2258,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r4 will point there, otherwise
// it will point to the backing store.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
// r0 = address of new object (tagged)
@@ -2396,7 +2273,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(r3, r4, LeaveCC, eq);
__ b(eq, &skip_parameter_map);
- __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
__ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
__ add(r6, r1, Operand(Smi::FromInt(2)));
__ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
@@ -2426,7 +2303,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r1 = mapping index (tagged)
// r3 = address of backing store (tagged)
// r4 = address of parameter map (tagged), which is also the address of new
- // object + Heap::kArgumentsObjectSize (tagged)
+ // object + Heap::kSloppyArgumentsObjectSize (tagged)
// r0 = temporary scratch (a.o., for address calculation)
// r5 = the hole value
__ jmp(&parameters_test);
@@ -2444,7 +2321,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ b(ne, &parameters_loop);
// Restore r0 = new object (tagged)
- __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
+ __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
__ bind(&skip_parameter_map);
// r0 = address of new object (tagged)
@@ -2482,7 +2359,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
@@ -2517,7 +2394,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ b(eq, &add_arguments_object);
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+ __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ Allocate(r1, r0, r2, r3, &runtime,
@@ -2527,7 +2404,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ ldr(r4, MemOperand(r4, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
@@ -2548,7 +2425,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
@@ -2576,7 +2453,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
}
@@ -2585,7 +2462,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2960,7 +2837,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3004,82 +2881,97 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : Feedback vector
+ // r3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
+ // Load the cache state into r4.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmp(r3, r1);
+ __ cmp(r4, r1);
__ b(eq, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in ecx.
- __ ldr(r5, FieldMemOperand(r3, 0));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &miss);
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ __ ldr(r5, FieldMemOperand(r4, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &miss);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &megamorphic);
- __ jmp(&done);
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &megamorphic);
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
+ __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
+ // An uninitialized cache is patched with the function
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &not_array_function);
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(r0);
+ __ Push(r3, r2, r1, r0);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(r0);
- __ Push(r2, r1, r0);
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ __ Pop(r3, r2, r1, r0);
+ __ SmiUntag(r0);
+ }
+ __ b(&done);
- __ Pop(r2, r1, r0);
- __ SmiUntag(r0);
+ __ bind(&not_array_function);
}
- __ b(&done);
- __ bind(&not_array_function);
- __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r1, MemOperand(r4, 0));
+
+ __ Push(r4, r2, r1);
+ __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(r4, r2, r1);
__ bind(&done);
}
@@ -3087,7 +2979,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : feedback vector
+ // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
Label slow, non_function, wrap, cont;
if (NeedsChecks()) {
@@ -3096,11 +2990,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(r1, &non_function);
// Goto slow case if we do not have a function.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in ebx we need
+ // to set ebx to undefined.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
}
}
@@ -3122,7 +3020,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ b(ne, &cont);
}
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ ldr(r3, MemOperand(sp, argc_ * kPointerSize));
if (NeedsChecks()) {
@@ -3143,14 +3041,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
+ // object (megamorphic symbol) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
+ __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
}
// Check for function proxy.
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function);
__ push(r1); // put proxy as additional argument
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
@@ -3176,7 +3075,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (CallAsMethod()) {
__ bind(&wrap);
// Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ Push(r1, r3);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ pop(r1);
@@ -3190,21 +3089,42 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : feedback vector
+ // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into r2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by r3 + 1.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into r2, or undefined.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(eq, &feedback_register_initialized);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(r2, r5);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = r3;
+ Register jmp_reg = r4;
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3212,10 +3132,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
- // r3: object type
+ // r4: object type
Label do_call;
__ bind(&slow);
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
__ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -3290,7 +3210,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -3312,7 +3232,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3760,7 +3680,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// r0: original string
@@ -3918,7 +3838,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
@@ -4405,7 +4325,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4459,7 +4379,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
@@ -4824,7 +4744,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -4837,13 +4757,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
@@ -4857,18 +4777,10 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
__ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5175,7 +5087,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ add(r3, r3, Operand(1));
if (FLAG_debug_code) {
@@ -5283,44 +5195,31 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
// -- r1 : constructor
- // -- r2 : type info cell
+ // -- r2 : AllocationSite or undefined
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
+
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ tst(r3, Operand(kSmiTagMask));
+ __ tst(r4, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r4, r4, r5, MAP_TYPE);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(eq, &okay_here);
- __ ldr(r3, FieldMemOperand(r2, 0));
- __ cmp(r3, Operand(cell_map));
- __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
- __ bind(&okay_here);
+ // We should either have undefined in r2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(r2, r4);
}
Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
- __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
-
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ ldr(r4, FieldMemOperand(r2, 0));
- __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &no_info);
__ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
@@ -5429,7 +5328,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = cp;
int argc = ArgumentBits::decode(bit_field_);
- bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5478,7 +5377,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// it's not controlled by GC.
const int kApiStackSpace = 4;
- FrameScope frame_scope(masm, StackFrame::MANUAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
ASSERT(!api_function_address.is(r0) && !scratch.is(r0));
@@ -5507,15 +5406,20 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- MemOperand return_value_operand(fp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address,
thunk_ref,
kStackUnwindSpace,
return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
+ &context_restore_operand);
}
@@ -5533,7 +5437,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
const int kApiStackSpace = 1;
- FrameScope frame_scope(masm, StackFrame::MANUAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// Create PropertyAccessorInfo instance on the stack above the exit frame with
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 7a371f1694..ef78802bef 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -324,7 +324,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 78bb66c49f..14f4705cbd 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -343,7 +343,7 @@ enum NeonSize {
Neon8 = 0x0,
Neon16 = 0x1,
Neon32 = 0x2,
- Neon64 = 0x4
+ Neon64 = 0x3
};
// -----------------------------------------------------------------------------
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index efd11069b3..12258ccad9 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -117,7 +117,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
@@ -265,9 +265,10 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
- // -- r2 : cache cell for call target
+ // -- r2 : feedback array
+ // -- r3 : slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0);
}
@@ -286,9 +287,10 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
- // -- r2 : cache cell for call target
+ // -- r2 : feedback array
+ // -- r3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 6031499dbd..ef3ea275cc 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -50,13 +50,36 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->bkpt(0);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->bkpt(0);
+ }
+ }
+
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
@@ -350,6 +373,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 49e4126b32..aa8ee22b73 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1061,7 +1061,7 @@ void Decoder::DecodeType3(Instruction* instr) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtb16'cond 'rd, 'rm, ror #0");
+ Format(instr, "uxtb16'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
@@ -1085,7 +1085,7 @@ void Decoder::DecodeType3(Instruction* instr) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtb'cond 'rd, 'rm, ror #0");
+ Format(instr, "uxtb'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
@@ -1100,7 +1100,7 @@ void Decoder::DecodeType3(Instruction* instr) {
} else {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #0");
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
break;
case 1:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
@@ -1566,7 +1566,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
@@ -1579,7 +1580,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 813e9492df..b5ec2d5fdf 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -111,6 +111,25 @@ class JumpPatchSite BASE_EMBEDDED {
};
+static void EmitStackCheck(MacroAssembler* masm_,
+ Register stack_limit_scratch,
+ int pointers = 0,
+ Register scratch = sp) {
+ Isolate* isolate = masm_->isolate();
+ Label ok;
+ ASSERT(scratch.is(sp) == (pointers == 0));
+ if (pointers != 0) {
+ __ sub(scratch, sp, Operand(pointers * kPointerSize));
+ }
+ __ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex);
+ __ cmp(scratch, Operand(stack_limit_scratch));
+ __ b(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@@ -130,6 +149,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -144,10 +166,10 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->is_classic_mode() && !info->is_native()) {
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@@ -170,27 +192,34 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
- __ LoadConstantPoolPointerRegister();
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- // Emit a loop to initialize stack cells for locals when optimizing for
- // size. Otherwise, unroll the loop for maximum performance.
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, r2, locals_count, r9);
+ }
__ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
- if (FLAG_optimize_for_size && locals_count > 4) {
- Label loop;
- __ mov(r2, Operand(locals_count));
- __ bind(&loop);
- __ sub(r2, r2, Operand(1), SetCC);
- __ push(r9);
- __ b(&loop, ne);
- } else {
- for (int i = 0; i < locals_count; i++) {
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(r2, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
__ push(r9);
}
+ // Continue loop if not done.
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(&loop_header, ne);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ push(r9);
}
}
}
@@ -205,13 +234,13 @@ void FullCodeGenerator::Generate() {
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
__ push(r1);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in r0. It replaces the context passed to us.
@@ -261,12 +290,12 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(type);
__ CallStub(&stub);
@@ -292,7 +321,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -301,13 +330,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ EmitStackCheck(masm_, ip);
}
{ Comment cmnt(masm_, "[ Body");
@@ -668,7 +691,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
+ CallIC(ic, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -789,7 +812,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -838,7 +861,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
__ Push(cp, r2, r1, r0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -894,7 +917,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -966,7 +989,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ mov(r1, Operand(pairs));
__ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -974,7 +997,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -1029,7 +1052,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1074,6 +1097,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1163,13 +1187,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(r1, cell);
- __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ Move(r1, FeedbackVector());
+ __ mov(r2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
+ __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1327,7 +1351,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
__ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1335,7 +1359,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
: Heap::kFalseValueRootIndex);
__ Push(cp, r0, r1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(r0);
}
@@ -1357,7 +1381,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1370,7 +1394,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1413,7 +1437,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1451,17 +1475,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1478,7 +1501,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand());
@@ -1491,9 +1514,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1525,7 +1547,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1533,18 +1555,18 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Let and const need a read barrier.
GetVar(r0, var);
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
__ b(ne, &done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
context()->Plug(r0);
@@ -1556,15 +1578,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ mov(r1, Operand(var->name()));
__ Push(cp, r1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(r0);
}
@@ -1597,7 +1619,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ mov(r2, Operand(expr->pattern()));
__ mov(r1, Operand(expr->flags()));
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(r5, r0);
__ bind(&materialized);
@@ -1609,7 +1631,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ mov(r0, Operand(Smi::FromInt(size)));
__ Push(r5, r0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(r5);
__ bind(&allocated);
@@ -1649,12 +1671,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -1692,7 +1713,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->value()));
__ ldr(r1, MemOperand(sp));
- CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1805,7 +1826,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(r0, Operand(Smi::FromInt(flags)));
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
@@ -1865,13 +1886,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2010,7 +2027,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ cmp(sp, r1);
__ b(eq, &post_runtime);
__ push(r0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
__ pop(result_register());
@@ -2076,7 +2093,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(r1, cp);
__ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(r0); // result
EmitReturnSequence();
@@ -2094,7 +2111,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ ldr(r1, MemOperand(sp, kPointerSize));
__ ldr(r0, MemOperand(sp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
+ CallIC(ic, TypeFeedbackId::None());
__ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize));
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2128,7 +2145,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in r0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
// r1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
@@ -2192,12 +2209,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ cmp(r3, Operand(0));
__ b(ne, &slow_resume);
__ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r2);
- __ add(r3, r3, r2);
- __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ Jump(r3);
+
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ if (FLAG_enable_ool_constant_pool) {
+ // Load the new code object's constant pool pointer.
+ __ ldr(pp,
+ MemOperand(r3, Code::kConstantPoolOffset - Code::kHeaderSize));
+ }
+
+ __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r2);
+ __ add(r3, r3, r2);
+ __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ Jump(r3);
+ }
__ bind(&slow_resume);
}
@@ -2213,7 +2239,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
ASSERT(!result_register().is(r1));
__ Push(r1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2228,14 +2254,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
} else {
// Throw the provided value.
__ push(r0);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(r1);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2253,7 +2279,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ ldr(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2291,7 +2317,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2318,8 +2344,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2396,20 +2421,14 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidLeftHandSide());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2435,7 +2454,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
- CallStoreIC(NOT_CONTEXTUAL);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2444,7 +2463,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
__ Pop(r0, r2); // r0 = restored value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2455,41 +2474,59 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ mov(r3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ push(r0); // Value.
+ __ mov(r1, Operand(name));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- CallStoreIC(CONTEXTUAL);
- } else if (op == Token::INIT_CONST) {
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- __ ldr(r1, StackOperand(var));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- __ str(result_register(), StackOperand(var), eq);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r1);
+ __ ldr(r2, location);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2499,23 +2536,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ b(ne, &assign);
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- __ str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2523,21 +2556,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ str(r0, location);
- if (var->IsContextSlot()) {
- __ mov(r3, r0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2555,7 +2574,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2569,10 +2588,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
SetSourcePosition(expr->position());
__ Pop(r2, r1); // r1 = key.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2599,12 +2618,10 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
// All calls must have a predictable size in full-codegen code to ensure that
// the debugger can patch them correctly.
- ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
NEVER_INLINE_TARGET_ADDRESS);
}
@@ -2624,7 +2641,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
+ // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
flags = NO_CALL_FUNCTION_FLAGS;
} else {
@@ -2716,15 +2733,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(r2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2744,15 +2761,15 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
- // r2: the language mode.
- __ mov(r2, Operand(Smi::FromInt(language_mode())));
+ // r2: strict mode.
+ __ mov(r2, Operand(Smi::FromInt(strict_mode())));
// r1: the start position of the scope the calls resides in.
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2768,8 +2785,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2826,7 +2843,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ASSERT(!context_register().is(r2));
__ mov(r2, Operand(proxy->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ Push(r0, r1); // Function, receiver.
// If fast case code has been generated, emit code to push the
@@ -2905,10 +2922,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(r2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -3380,7 +3404,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ __ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
@@ -3474,7 +3498,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(r0);
}
@@ -3843,7 +3867,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(r0);
@@ -4120,8 +4144,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4185,9 +4209,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
+ __ mov(r1, Operand(Smi::FromInt(strict_mode())));
__ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -4195,11 +4217,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ mov(r0, Operand(Smi::FromInt(SLOPPY)));
__ Push(r2, r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -4213,7 +4235,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!context_register().is(r2));
__ mov(r2, Operand(var->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(r0);
}
} else {
@@ -4288,16 +4310,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4411,9 +4428,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- NOT_CONTEXTUAL,
- expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4442,7 +4457,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4455,10 +4470,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_PROPERTY: {
__ Pop(r2, r1); // r1 = key. r2 = receiver.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4478,7 +4493,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference
@@ -4487,6 +4502,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4496,7 +4512,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ mov(r0, Operand(proxy->name()));
__ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4648,7 +4664,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand::Zero());
@@ -4683,7 +4699,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
__ cmp(r0, Operand(0));
Split(ne, if_true, if_false, fall_through);
}
@@ -4839,7 +4855,18 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+static Address GetInterruptImmediateLoadAddress(Address pc) {
+ Address load_address = pc - 2 * Assembler::kInstrSize;
+ if (!FLAG_enable_ool_constant_pool) {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
+ } else if (Assembler::IsMovT(Memory::int32_at(load_address))) {
+ load_address -= Assembler::kInstrSize;
+ ASSERT(Assembler::IsMovW(Memory::int32_at(load_address)));
+ } else {
+ ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
+ }
+ return load_address;
+}
void BackEdgeTable::PatchAt(Code* unoptimized_code,
@@ -4847,37 +4874,42 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
BackEdgeState target_state,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
- Address branch_address = pc - 3 * kInstrSize;
+ Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
+ Address branch_address = pc_immediate_load_address - kInstrSize;
CodePatcher patcher(branch_address, 1);
-
switch (target_state) {
case INTERRUPT:
+ {
// <decrement profiling counter>
- // 2a 00 00 01 bpl ok
- // e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
- // e1 2f ff 3c blx ip
+ // bpl ok
+ // ; load interrupt stub address into ip - either of:
+ // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low>
+ // | movt ip, <immed high>
+ // blx ip
// ok-label
- patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions.
- ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address));
+
+ // Calculate branch offet to the ok-label - this is the difference between
+ // the branch address and |pc| (which points at <blx ip>) plus one instr.
+ int branch_offset = pc + kInstrSize - branch_address;
+ patcher.masm()->b(branch_offset, pl);
break;
+ }
case ON_STACK_REPLACEMENT:
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
- // e1 a0 00 00 mov r0, r0 (NOP)
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
- // e1 2f ff 3c blx ip
+ // mov r0, r0 (NOP)
+ // ; load on-stack replacement address into ip - either of:
+ // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low>
+ // | movt ip, <immed high>
+ // blx ip
// ok-label
patcher.masm()->nop();
break;
}
- Address pc_immediate_load_address = pc - 2 * kInstrSize;
// Replace the call address.
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_immediate_load_address) & 0xfff;
- Address interrupt_address_pointer = pc + interrupt_address_offset;
- Memory::uint32_at(interrupt_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
+ Assembler::set_target_address_at(pc_immediate_load_address, unoptimized_code,
+ replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_immediate_load_address, replacement_code);
@@ -4891,34 +4923,26 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);
- Address branch_address = pc - 3 * kInstrSize;
- Address pc_immediate_load_address = pc - 2 * kInstrSize;
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_immediate_load_address) & 0xfff;
- Address interrupt_address_pointer = pc + interrupt_address_offset;
-
- if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) {
- ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->InterruptCheck()->entry()));
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_immediate_load_address)));
+ Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
+ Address branch_address = pc_immediate_load_address - kInstrSize;
+ Address interrupt_address = Assembler::target_address_at(
+ pc_immediate_load_address, unoptimized_code);
+
+ if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
+ ASSERT(interrupt_address ==
+ isolate->builtins()->InterruptCheck()->entry());
return INTERRUPT;
}
ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_immediate_load_address)));
- if (Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->OnStackReplacement()->entry())) {
+ if (interrupt_address ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
- ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->OsrAfterStackCheck()->entry()));
+ ASSERT(interrupt_address ==
+ isolate->builtins()->OsrAfterStackCheck()->entry());
return OSR_AFTER_STACK_CHECK;
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index d324a8c6b3..3d57105afe 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -333,8 +333,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -342,9 +341,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@@ -430,7 +427,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ b(ne, slow_case);
// Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
@@ -492,7 +489,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -518,7 +515,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -879,7 +876,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1063,7 +1060,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1162,8 +1159,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1172,9 +1168,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
@@ -1225,7 +1219,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index fcd7d3c9a3..55705b8073 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -831,7 +831,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
@@ -1110,6 +1109,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1151,6 +1151,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@@ -1242,21 +1249,62 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
+ LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
- return AssignEnvironment(DefineAsRegister(div));
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
- LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineAsRegister(div));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1265,97 +1313,106 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
- uint32_t divisor_abs = abs(divisor);
- // Dividing by 0, 1, and powers of 2 is easy.
- // Note that IsPowerOf2(0) returns true;
- ASSERT(IsPowerOf2(0) == true);
- if (IsPowerOf2(divisor_abs)) return true;
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
- // We have magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
- return false;
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- // LMathFloorOfDiv can only handle a subset of divisors, so fall
- // back to a flooring division in all other cases.
- HValue* right = instr->right();
- if (!right->IsInteger32Constant() ||
- (!CpuFeatures::IsSupported(SUDIV) &&
- !HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()))) {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(right);
- LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
- LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineAsRegister(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
}
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
- ? UseRegister(right)
- : UseOrConstant(right);
- LOperand* remainder = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d10);
+ LOperand* temp2 = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d11);
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(
+ dividend, divisor, temp, temp2));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseConstant(right));
- LInstruction* result = DefineAsRegister(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- } else if (CpuFeatures::IsSupported(SUDIV)) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right),
- FixedTemp(d10),
- FixedTemp(d11));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1846,25 +1903,27 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
- : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1939,6 +1998,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -2195,11 +2268,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier ||
- (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ if (needs_write_barrier || instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2209,8 +2280,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
+ if (instr->field_representation().IsHeapObject()) {
if (!instr->value()->type().IsHeapObject()) {
return AssignEnvironment(result);
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 29a176628e..34eb510177 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -80,17 +80,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -103,7 +109,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -124,14 +129,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -173,7 +180,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@@ -614,12 +620,45 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL,
- LOperand* temp2 = NULL) {
+ LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
@@ -636,6 +675,42 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -648,29 +723,47 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
};
@@ -809,6 +902,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
@@ -1885,19 +1990,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@@ -1910,38 +2002,33 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2026,6 +2113,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2101,7 +2189,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2164,7 +2252,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2365,6 +2453,33 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2579,10 +2694,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
+ allocator_(allocator) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2607,6 +2719,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2717,9 +2838,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HBasicBlock* current_block_;
HBasicBlock* next_block_;
LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index cfcc56da29..7152ba21cc 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -84,7 +84,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -147,11 +147,11 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
+ info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -173,7 +173,6 @@ bool LCodeGen::GeneratePrologue() {
__ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
- __ LoadConstantPoolPointerRegister();
}
// Reserve space for the stack slots needed by the code.
@@ -212,7 +211,7 @@ bool LCodeGen::GeneratePrologue() {
__ CallStub(&stub);
} else {
__ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both r0 and cp. It replaces the context
@@ -270,6 +269,9 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
@@ -284,7 +286,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -433,7 +436,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
__ Move(scratch, literal);
}
return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
__ ldr(scratch, ToMemOperand(op));
return scratch;
}
@@ -469,7 +472,7 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (r.IsTagged()) {
Abort(kUnsupportedTaggedImmediate);
}
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
// TODO(regis): Why is vldr not taking a MemOperand?
// __ vldr(dbl_scratch, ToMemOperand(op));
MemOperand mem_op = ToMemOperand(op);
@@ -689,10 +692,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -913,6 +912,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1113,36 +1120,70 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->RightIsPowerOf2()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- Register result_reg = ToRegister(instr->result());
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ cmp(dividend, Operand::Zero());
+ __ b(pl, &dividend_is_not_negative);
+ // Note that this is correct even for kMinInt operands.
+ __ rsb(dividend, dividend, Operand::Zero());
+ __ and_(dividend, dividend, Operand(mask));
+ __ rsb(dividend, dividend, Operand::Zero(), SetCC);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ b(&done);
+ }
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
+ __ bind(&dividend_is_not_negative);
+ __ and_(dividend, dividend, Operand(mask));
+ __ bind(&done);
+}
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ cmp(left_reg, Operand::Zero());
- __ b(pl, &left_is_not_negative);
- __ rsb(result_reg, left_reg, Operand::Zero());
- __ and_(result_reg, result_reg, Operand(divisor - 1));
- __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
- }
- __ b(&done);
- }
- __ bind(&left_is_not_negative);
- __ and_(result_reg, left_reg, Operand(divisor - 1));
- __ bind(&done);
- } else if (CpuFeatures::IsSupported(SUDIV)) {
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ mov(ip, Operand(Abs(divisor)));
+ __ smull(result, ip, result, ip);
+ __ sub(result, dividend, result, SetCC);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ b(ne, &remainder_not_zero);
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(lt, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
Register left_reg = ToRegister(instr->left());
@@ -1152,14 +1193,14 @@ void LCodeGen::DoModI(LModI* instr) {
Label done;
// Check for x % 0, sdiv might signal an exception. We have to deopt in this
// case because we can't return a NaN.
- if (right->CanBeZero()) {
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
// want. We have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
Label no_overflow_possible;
__ cmp(left_reg, Operand(kMinInt));
__ b(ne, &no_overflow_possible);
@@ -1182,9 +1223,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ mls(result_reg, result_reg, right_reg, left_reg);
// If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
@@ -1211,7 +1250,7 @@ void LCodeGen::DoModI(LModI* instr) {
Label done;
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
- if (right->CanBeZero()) {
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
@@ -1240,9 +1279,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ sub(result_reg, left_reg, scratch, SetCC);
// If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
DeoptimizeIf(mi, instr->environment());
@@ -1252,165 +1289,94 @@ void LCodeGen::DoModI(LModI* instr) {
}
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
- Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment) {
- ASSERT(!AreAliased(dividend, scratch, ip));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
- uint32_t divisor_abs = abs(divisor);
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmp(dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ tst(dividend, Operand(mask));
+ DeoptimizeIf(ne, instr->environment());
+ }
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ rsb(result, dividend, Operand(0));
+ return;
+ }
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ mov(result, dividend);
+ } else if (shift == 1) {
+ __ add(result, dividend, Operand(dividend, LSR, 31));
+ } else {
+ __ mov(result, Operand(dividend, ASR, 31));
+ __ add(result, dividend, Operand(result, LSR, 32 - shift));
+ }
+ if (shift > 0) __ mov(result, Operand(result, ASR, shift));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+}
- switch (divisor_abs) {
- case 0:
- DeoptimizeIf(al, environment);
- return;
- case 1:
- if (divisor > 0) {
- __ Move(result, dividend);
- } else {
- __ rsb(result, dividend, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, environment);
- }
- // Compute the remainder.
- __ mov(remainder, Operand::Zero());
- return;
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- default:
- if (IsPowerOf2(divisor_abs)) {
- // Branch and condition free code for integer division by a power
- // of two.
- int32_t power = WhichPowerOf2(divisor_abs);
- if (power > 1) {
- __ mov(scratch, Operand(dividend, ASR, power - 1));
- }
- __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
- __ mov(result, Operand(scratch, ASR, power));
- // Negate if necessary.
- // We don't need to check for overflow because the case '-1' is
- // handled separately.
- if (divisor < 0) {
- ASSERT(divisor != -1);
- __ rsb(result, result, Operand::Zero());
- }
- // Compute the remainder.
- if (divisor > 0) {
- __ sub(remainder, dividend, Operand(result, LSL, power));
- } else {
- __ add(remainder, dividend, Operand(result, LSL, power));
- }
- return;
- } else {
- // Use magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- // Branch and condition free code for integer division by a power
- // of two.
- const int32_t M = magic_numbers.M;
- const int32_t s = magic_numbers.s + power_of_2_factor;
-
- __ mov(ip, Operand(M));
- __ smull(ip, scratch, dividend, ip);
- if (M < 0) {
- __ add(scratch, scratch, Operand(dividend));
- }
- if (s > 0) {
- __ mov(scratch, Operand(scratch, ASR, s));
- }
- __ add(result, scratch, Operand(dividend, LSR, 31));
- if (divisor < 0) __ rsb(result, result, Operand::Zero());
- // Compute the remainder.
- __ mov(ip, Operand(divisor));
- // This sequence could be replaced with 'mls' when
- // it gets implemented.
- __ mul(scratch, result, ip);
- __ sub(remainder, dividend, scratch);
- }
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
}
-}
-
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
- const Register dividend = ToRegister(instr->left());
- const Register result = ToRegister(instr->result());
- int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- __ sub(result, dividend, Operand::Zero(), SetCC);
- __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
- __ mov(result, Operand(result, ASR, power));
- if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
- if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
- return; // Don't fall through to "__ rsb" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ tst(dividend, Operand(test_value));
- DeoptimizeIf(ne, instr->environment());
- __ mov(result, Operand(dividend, ASR, power));
- if (divisor < 0) __ rsb(result, result, Operand(0));
- }
- } else {
- if (divisor < 0) {
- __ rsb(result, dividend, Operand(0));
- } else {
- __ Move(result, dividend);
- }
- }
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
- return;
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mov(ip, Operand(divisor));
+ __ smull(scratch0(), ip, result, ip);
+ __ sub(scratch0(), scratch0(), dividend, SetCC);
+ DeoptimizeIf(ne, instr->environment());
}
+}
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
- const Register result = ToRegister(instr->result());
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+ Register result = ToRegister(instr->result());
// Check for x / 0.
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive;
if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
// Do the test only if it hadn't be done above.
@@ -1423,10 +1389,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow) &&
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
(!CpuFeatures::IsSupported(SUDIV) ||
- !instr->hydrogen_value()->CheckFlag(
- HValue::kAllUsesTruncatingToInt32))) {
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
// We don't need to check for overflow when truncating with sdiv
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
@@ -1437,18 +1402,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(result, left, right);
-
- if (!instr->hydrogen_value()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- // Compute remainder and deopt if it's not zero.
- const Register remainder = scratch0();
- __ mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr->environment());
- }
} else {
- const DoubleRegister vleft = ToDoubleRegister(instr->temp());
- const DoubleRegister vright = double_scratch0();
+ DoubleRegister vleft = ToDoubleRegister(instr->temp());
+ DoubleRegister vright = double_scratch0();
__ vmov(double_scratch0().low(), left);
__ vcvt_f64_s32(vleft, double_scratch0().low());
__ vmov(double_scratch0().low(), right);
@@ -1456,15 +1412,23 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ vdiv(vleft, vleft, vright); // vleft now contains the result.
__ vcvt_s32_f64(double_scratch0().low(), vleft);
__ vmov(result, double_scratch0().low());
+ }
- if (!instr->hydrogen_value()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- // Deopt if exact conversion to integer was not possible.
- // Use vright as scratch register.
- __ vcvt_f64_s32(double_scratch0(), double_scratch0().low());
- __ VFPCompareAndSetFlags(vleft, double_scratch0());
- DeoptimizeIf(ne, instr->environment());
- }
+ if (hdiv->IsMathFloorOfDiv()) {
+ Label done;
+ Register remainder = scratch0();
+ __ mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ __ b(eq, &done);
+ __ eor(remainder, remainder, Operand(right));
+ __ add(result, result, Operand(remainder, ASR, 31));
+ __ bind(&done);
+ } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ // Compute remainder and deopt if it's not zero.
+ Register remainder = scratch0();
+ __ mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ DeoptimizeIf(ne, instr->environment());
}
}
@@ -1493,71 +1457,84 @@ void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->left());
- const Register remainder = ToRegister(instr->temp());
- const Register scratch = scratch0();
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ mov(result, Operand(dividend, ASR, shift));
+ return;
+ }
- if (!CpuFeatures::IsSupported(SUDIV)) {
- // If the CPU doesn't support sdiv instruction, we only optimize when we
- // have magic numbers for the divisor. The standard integer division routine
- // is usually slower than transitionning to VFP.
- ASSERT(instr->right()->IsConstantOperand());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
- if (divisor < 0) {
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ rsb(result, dividend, Operand::Zero(), SetCC);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ if (divisor == -1) {
+ DeoptimizeIf(vs, instr->environment());
+ __ mov(result, Operand(dividend, ASR, shift));
+ } else {
+ __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
+ __ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc);
}
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We performed a truncating division. Correct the result if necessary.
- __ cmp(remainder, Operand::Zero());
- __ teq(remainder, Operand(divisor), ne);
- __ sub(result, result, Operand(1), LeaveCC, mi);
} else {
- CpuFeatureScope scope(masm(), SUDIV);
- const Register right = ToRegister(instr->right());
-
- // Check for x / 0.
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ __ mov(result, Operand(dividend, ASR, shift));
+ }
+}
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(left, Operand(kMinInt));
- __ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
- }
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(right, Operand::Zero());
- __ cmp(left, Operand::Zero(), mi);
- // "right" can't be null because the code would have already been
- // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
- // In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr->environment());
- }
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- Label done;
- __ sdiv(result, left, right);
- // If both operands have the same sign then we are done.
- __ eor(remainder, left, Operand(right), SetCC);
- __ b(pl, &done);
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
- // Check if the result needs to be corrected.
- __ mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- __ sub(result, result, Operand(1), LeaveCC, ne);
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
- __ bind(&done);
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ return;
}
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ cmp(dividend, Operand::Zero());
+ __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ __ jmp(&done);
+ __ bind(&needs_adjustment);
+ __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ __ sub(result, result, Operand(1));
+ __ bind(&done);
}
@@ -1676,7 +1653,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister(instr->result());
Operand right(no_reg);
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, ip));
} else {
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
@@ -1799,7 +1776,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -1820,7 +1797,7 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -1993,7 +1970,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -2742,9 +2719,6 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
- ASSERT(object.is(r0));
- ASSERT(result.is(r0));
-
// A Smi is not instance of anything.
__ JumpIfSmi(object, &false_result);
@@ -2802,9 +2776,6 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check) {
- Register result = ToRegister(instr->result());
- ASSERT(result.is(r0));
-
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kArgsInRegisters);
@@ -2817,37 +2788,32 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- // Get the temp register reserved by the instruction. This needs to be r4 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->temp());
- ASSERT(temp.is(r4));
__ Move(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 5;
+ static const int kAdditionalDelta = 4;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta);
- __ mov(temp, Operand(delta * kPointerSize));
+ // r5 is used to communicate the offset to the location of the map check.
+ __ mov(r5, Operand(delta * kPointerSize));
// The mov above can generate one or two instructions. The delta was computed
// for two instructions, so we need to pad here in case of one instruction.
if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
__ nop();
}
- __ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value into the result register slot and
+ // Put the result value (r0) into the result register slot and
// restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
+ __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
}
@@ -3225,7 +3191,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3573,7 +3539,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3664,7 +3630,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
@@ -3881,6 +3847,13 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ clz(result, input);
+}
+
+
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(r1));
@@ -3964,8 +3937,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ mov(r0, Operand(instr->arity()));
// No cell in r2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ mov(r2, Operand(undefined_value));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3977,7 +3949,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
- __ mov(r2, Operand(factory()->undefined_value()));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -4057,12 +4029,21 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsSmi(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ SmiTst(value);
DeoptimizeIf(eq, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
} else if (representation.IsDouble()) {
ASSERT(transition.is_null());
@@ -4092,9 +4073,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation);
@@ -4136,8 +4114,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4258,7 +4235,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4374,7 +4351,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
@@ -4486,7 +4463,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
instr->context());
__ AssertSmi(r0);
__ SmiUntag(r0);
@@ -4561,20 +4538,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- ASSERT(output->IsRegister());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
- DeoptimizeIf(vs, instr->environment());
- } else {
- __ SmiTag(ToRegister(output), ToRegister(input));
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4585,27 +4548,17 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ tst(ToRegister(input), Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment());
- }
- __ SmiTag(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4628,9 +4581,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4648,18 +4603,19 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
LowDwVfpRegister dbl_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -4676,38 +4632,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
}
if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, r5);
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
__ b(&done);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ mov(dst, Operand::Zero());
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ mov(ip, Operand::Zero());
- __ StoreToSafepointRegisterSlot(ip, dst);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Move(dst, r0);
- __ sub(dst, dst, Operand(kHeapObjectTag));
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
+ __ StoreToSafepointRegisterSlot(r0, dst);
+ }
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
__ add(dst, dst, Operand(kHeapObjectTag));
- __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4756,11 +4714,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ sub(r0, r0, Operand(kHeapObjectTag));
@@ -4769,8 +4727,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ tst(input, Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTag(output, input, SetCC);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ SmiTag(output, input);
+ }
}
@@ -5220,6 +5191,26 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DwVfpRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ VmovHigh(result_reg, value_reg);
+ } else {
+ __ VmovLow(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
+ __ VmovHigh(result_reg, hi_reg);
+ __ VmovLow(result_reg, lo_reg);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5328,7 +5319,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5362,7 +5353,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ mov(r4, Operand(instr->hydrogen()->pattern()));
__ mov(r3, Operand(instr->hydrogen()->flags()));
__ Push(r6, r5, r4, r3);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
__ bind(&materialized);
@@ -5375,7 +5366,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ mov(r0, Operand(Smi::FromInt(size)));
__ Push(r1, r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(r1);
__ bind(&allocated);
@@ -5390,7 +5381,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -5399,7 +5390,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
__ mov(r1, Operand(pretenure ? factory()->true_value()
: factory()->false_value()));
__ Push(cp, r2, r1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5548,7 +5539,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5584,7 +5575,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -5622,10 +5613,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index d58c18f6c9..21da500d01 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -126,9 +126,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -162,9 +164,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
@@ -348,17 +348,6 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
- // Emit optimized code for integer division.
- // Inputs are signed.
- // All registers are clobbered.
- // If 'remainder' is no_reg, it is not computed.
- void EmitSignedIntegerDivisionByConstant(Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment);
-
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 77c514ff54..2bfe09f768 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -133,6 +133,12 @@ void MacroAssembler::Call(Address target,
set_predictable_code_size(true);
}
+#ifdef DEBUG
+ // Check the expected size before generating code to ensure we assume the same
+ // constant pool availability (e.g., whether constant pool is full or not).
+ int expected_size = CallSize(target, rmode, cond);
+#endif
+
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
@@ -153,7 +159,7 @@ void MacroAssembler::Call(Address target,
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
- ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
+ ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
}
@@ -888,6 +894,16 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
+void MacroAssembler::LoadConstantPoolPointerRegister() {
+ if (FLAG_enable_ool_constant_pool) {
+ int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
+ pc_offset() - Instruction::kPCReadOffset;
+ ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
+ ldr(pp, MemOperand(pc, constant_pool_offset));
+ }
+}
+
+
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
PushFixedFrame();
@@ -912,22 +928,20 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
-}
-
-
-void MacroAssembler::LoadConstantPoolPointerRegister() {
if (FLAG_enable_ool_constant_pool) {
- int constant_pool_offset =
- Code::kConstantPoolOffset - Code::kHeaderSize - pc_offset() - 8;
- ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
- ldr(pp, MemOperand(pc, constant_pool_offset));
+ LoadConstantPoolPointerRegister();
+ set_constant_pool_available(true);
}
}
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool) {
// r0-r3: preserved
PushFixedFrame();
+ if (FLAG_enable_ool_constant_pool && load_constant_pool) {
+ LoadConstantPoolPointerRegister();
+ }
mov(ip, Operand(Smi::FromInt(type)));
push(ip);
mov(ip, Operand(CodeObject()));
@@ -975,6 +989,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
}
if (FLAG_enable_ool_constant_pool) {
str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ LoadConstantPoolPointerRegister();
}
mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@@ -1045,6 +1060,8 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
bool restore_context) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@@ -1059,7 +1076,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
-
// Restore current context from top and clear it in debug mode.
if (restore_context) {
mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
@@ -1366,6 +1382,11 @@ void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// r0 = exception, r1 = code object, r2 = state.
+
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ if (FLAG_enable_ool_constant_pool) {
+ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool.
+ }
ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
@@ -2411,7 +2432,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
{
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
0);
}
jmp(&exception_handled);
@@ -2806,16 +2827,8 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -2827,25 +2840,24 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- mov(r0, Operand(p0));
- push(r0);
- mov(r0, Operand(Smi::FromInt(p1 - p0)));
+ mov(r0, Operand(Smi::FromInt(reason)));
push(r0);
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
- static const int kExpectedAbortInstructions = 10;
+ static const int kExpectedAbortInstructions = 7;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@@ -2899,31 +2911,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- ldr(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
ldr(function,
@@ -2936,19 +2923,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- ldr(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- ldr(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- ldr(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -3070,6 +3044,20 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ b(eq, &done_checking);
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
@@ -3579,22 +3567,31 @@ void MacroAssembler::CallCFunctionHelper(Register function,
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
+ Register result) {
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
- const int32_t kPCRegOffset = 2 * kPointerSize;
ldr(result, MemOperand(ldr_location));
if (emit_debug_code()) {
- // Check that the instruction is a ldr reg, [pc + offset] .
- and_(result, result, Operand(kLdrPCPattern));
- cmp(result, Operand(kLdrPCPattern));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
+ // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
+ if (FLAG_enable_ool_constant_pool) {
+ and_(result, result, Operand(kLdrPpPattern));
+ cmp(result, Operand(kLdrPpPattern));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPp);
+ } else {
+ and_(result, result, Operand(kLdrPCPattern));
+ cmp(result, Operand(kLdrPCPattern));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
+ }
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
// Get the address of the constant.
and_(result, result, Operand(kLdrOffsetMask));
- add(result, ldr_location, Operand(result));
- add(result, result, Operand(kPCRegOffset));
+ if (FLAG_enable_ool_constant_pool) {
+ add(result, pp, Operand(result));
+ } else {
+ add(result, ldr_location, Operand(result));
+ add(result, result, Operand(Instruction::kPCReadOffset));
+ }
}
@@ -3849,9 +3846,9 @@ void MacroAssembler::Throw(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// will not return here
if (is_const_pool_blocked()) {
@@ -4079,6 +4076,26 @@ void CodePatcher::EmitCondition(Condition cond) {
}
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!dividend.is(result));
+ ASSERT(!dividend.is(ip));
+ ASSERT(!result.is(ip));
+ MultiplierAndShift ms(divisor);
+ mov(ip, Operand(ms.multiplier()));
+ smull(ip, result, dividend, ip);
+ if (divisor > 0 && ms.multiplier() < 0) {
+ add(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && ms.multiplier() > 0) {
+ sub(result, result, Operand(dividend));
+ }
+ if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift()));
+ add(result, result, Operand(dividend, LSR, 31));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 7861d42aab..6b6ecd32da 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -540,9 +540,6 @@ class MacroAssembler: public Assembler {
// Generates function and stub prologue code.
void Prologue(PrologueFrameMode frame_mode);
- // Loads the constant pool pointer (pp) register.
- void LoadConstantPoolPointerRegister();
-
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -570,14 +567,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -1162,6 +1152,10 @@ class MacroAssembler: public Assembler {
}
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and ip gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1296,6 +1290,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
@@ -1390,7 +1388,7 @@ class MacroAssembler: public Assembler {
}
// Activation support.
- void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type);
@@ -1467,6 +1465,9 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ // Loads the constant pool pointer (pp) register.
+ void LoadConstantPoolPointerRegister();
+
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.
@@ -1516,6 +1517,70 @@ class CodePatcher {
};
+class FrameAndConstantPoolScope {
+ public:
+ FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type)
+ : masm_(masm),
+ type_(type),
+ old_has_frame_(masm->has_frame()),
+ old_constant_pool_available_(masm->is_constant_pool_available()) {
+ masm->set_has_frame(true);
+ masm->set_constant_pool_available(true);
+ if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ masm->EnterFrame(type, !old_constant_pool_available_);
+ }
+ }
+
+ ~FrameAndConstantPoolScope() {
+ masm_->LeaveFrame(type_);
+ masm_->set_has_frame(old_has_frame_);
+ masm_->set_constant_pool_available(old_constant_pool_available_);
+ }
+
+ // Normally we generate the leave-frame code when this object goes
+ // out of scope. Sometimes we may need to generate the code somewhere else
+ // in addition. Calling this will achieve that, but the object stays in
+ // scope, the MacroAssembler is still marked as being in a frame scope, and
+ // the code will be generated again when it goes out of scope.
+ void GenerateLeaveFrame() {
+ ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
+ masm_->LeaveFrame(type_);
+ }
+
+ private:
+ MacroAssembler* masm_;
+ StackFrame::Type type_;
+ bool old_has_frame_;
+ bool old_constant_pool_available_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
+};
+
+
+// Class for scoping the the unavailability of constant pool access.
+class ConstantPoolUnavailableScope {
+ public:
+ explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
+ : masm_(masm),
+ old_constant_pool_available_(masm->is_constant_pool_available()) {
+ if (FLAG_enable_ool_constant_pool) {
+ masm_->set_constant_pool_available(false);
+ }
+ }
+ ~ConstantPoolUnavailableScope() {
+ if (FLAG_enable_ool_constant_pool) {
+ masm_->set_constant_pool_available(old_constant_pool_available_);
+ }
+ }
+
+ private:
+ MacroAssembler* masm_;
+ int old_constant_pool_available_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
+};
+
+
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index ac36687fca..8f7c1e8bb2 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -796,6 +796,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
+Simulator::~Simulator() {
+}
+
+
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
@@ -3466,7 +3470,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
@@ -3489,7 +3494,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 0af5162e93..24d7fe58c4 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -207,6 +207,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 694a4ed68f..c595e42745 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -322,7 +322,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -351,60 +351,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
- __ b(ne, non_string_object);
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
-
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -481,11 +427,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ Move(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
@@ -559,15 +505,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -585,15 +531,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ str(value_reg, FieldMemOperand(scratch1, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -643,11 +589,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -688,7 +634,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -712,7 +658,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -783,13 +729,14 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
ASSERT(!receiver.is(scratch_in));
__ push(receiver);
// Write the arguments to stack frame.
@@ -854,7 +801,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(true, call_data_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -878,9 +825,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, Operand(receiver_map));
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1076,15 +1020,6 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization,
- Handle<Map> receiver_map) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1173,7 +1108,7 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
__ Push(receiver(), holder_reg, this->name());
} else {
@@ -1260,24 +1195,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch3(), 1, values);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -1285,20 +1202,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
// -- lr : return address
// -----------------------------------
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- Register receiver = r1;
- Register value = r0;
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
- __ push(value);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
@@ -1308,7 +1221,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
FieldMemOperand(
receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ Push(receiver, value);
+ __ Push(receiver, value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@@ -1336,21 +1249,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@@ -1358,10 +1256,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1396,16 +1290,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
+Register StoreStubCompiler::value() {
+ return r0;
+}
+
+
Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { r1, r2, r0, r3, r4, r5 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { r1, r2, r3, r4, r5 };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { r2, r1, r0, r3, r4, r5 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { r2, r1, r3, r4, r5 };
return registers;
}
@@ -1424,7 +1323,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
// -- lr : return address
// -----------------------------------
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
@@ -1537,6 +1436,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
diff --git a/deps/v8/src/arm64/OWNERS b/deps/v8/src/arm64/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/arm64/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
new file mode 100644
index 0000000000..b56e3ed2a1
--- /dev/null
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -0,0 +1,1229 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
+#define V8_ARM64_ASSEMBLER_ARM64_INL_H_
+
+#include "arm64/assembler-arm64.h"
+#include "cpu.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+void RelocInfo::apply(intptr_t delta) {
+ UNIMPLEMENTED();
+}
+
+
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, host_, target);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+inline unsigned CPURegister::code() const {
+ ASSERT(IsValid());
+ return reg_code;
+}
+
+
+inline CPURegister::RegisterType CPURegister::type() const {
+ ASSERT(IsValidOrNone());
+ return reg_type;
+}
+
+
+inline RegList CPURegister::Bit() const {
+ ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
+ return IsValid() ? 1UL << reg_code : 0;
+}
+
+
+inline unsigned CPURegister::SizeInBits() const {
+ ASSERT(IsValid());
+ return reg_size;
+}
+
+
+inline int CPURegister::SizeInBytes() const {
+ ASSERT(IsValid());
+ ASSERT(SizeInBits() % 8 == 0);
+ return reg_size / 8;
+}
+
+
+inline bool CPURegister::Is32Bits() const {
+ ASSERT(IsValid());
+ return reg_size == 32;
+}
+
+
+inline bool CPURegister::Is64Bits() const {
+ ASSERT(IsValid());
+ return reg_size == 64;
+}
+
+
+inline bool CPURegister::IsValid() const {
+ if (IsValidRegister() || IsValidFPRegister()) {
+ ASSERT(!IsNone());
+ return true;
+ } else {
+ ASSERT(IsNone());
+ return false;
+ }
+}
+
+
+inline bool CPURegister::IsValidRegister() const {
+ return IsRegister() &&
+ ((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)) &&
+ ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
+}
+
+
+inline bool CPURegister::IsValidFPRegister() const {
+ return IsFPRegister() &&
+ ((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
+ (reg_code < kNumberOfFPRegisters);
+}
+
+
+inline bool CPURegister::IsNone() const {
+ // kNoRegister types should always have size 0 and code 0.
+ ASSERT((reg_type != kNoRegister) || (reg_code == 0));
+ ASSERT((reg_type != kNoRegister) || (reg_size == 0));
+
+ return reg_type == kNoRegister;
+}
+
+
+inline bool CPURegister::Is(const CPURegister& other) const {
+ ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return (reg_code == other.reg_code) && (reg_size == other.reg_size) &&
+ (reg_type == other.reg_type);
+}
+
+
+inline bool CPURegister::IsRegister() const {
+ return reg_type == kRegister;
+}
+
+
+inline bool CPURegister::IsFPRegister() const {
+ return reg_type == kFPRegister;
+}
+
+
+inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
+ return (reg_size == other.reg_size) && (reg_type == other.reg_type);
+}
+
+
+inline bool CPURegister::IsValidOrNone() const {
+ return IsValid() || IsNone();
+}
+
+
+inline bool CPURegister::IsZero() const {
+ ASSERT(IsValid());
+ return IsRegister() && (reg_code == kZeroRegCode);
+}
+
+
+inline bool CPURegister::IsSP() const {
+ ASSERT(IsValid());
+ return IsRegister() && (reg_code == kSPRegInternalCode);
+}
+
+
+inline void CPURegList::Combine(const CPURegList& other) {
+ ASSERT(IsValid());
+ ASSERT(other.type() == type_);
+ ASSERT(other.RegisterSizeInBits() == size_);
+ list_ |= other.list();
+}
+
+
+inline void CPURegList::Remove(const CPURegList& other) {
+ ASSERT(IsValid());
+ if (other.type() == type_) {
+ list_ &= ~other.list();
+ }
+}
+
+
+inline void CPURegList::Combine(const CPURegister& other) {
+ ASSERT(other.type() == type_);
+ ASSERT(other.SizeInBits() == size_);
+ Combine(other.code());
+}
+
+
+inline void CPURegList::Remove(const CPURegister& other1,
+ const CPURegister& other2,
+ const CPURegister& other3,
+ const CPURegister& other4) {
+ if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
+ if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
+ if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
+ if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
+}
+
+
+inline void CPURegList::Combine(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ list_ |= (1UL << code);
+}
+
+
+inline void CPURegList::Remove(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ list_ &= ~(1UL << code);
+}
+
+
+inline Register Register::XRegFromCode(unsigned code) {
+ // This function returns the zero register when code = 31. The stack pointer
+ // can not be returned.
+ ASSERT(code < kNumberOfRegisters);
+ return Register::Create(code, kXRegSizeInBits);
+}
+
+
+inline Register Register::WRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfRegisters);
+ return Register::Create(code, kWRegSizeInBits);
+}
+
+
+inline FPRegister FPRegister::SRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return FPRegister::Create(code, kSRegSizeInBits);
+}
+
+
+inline FPRegister FPRegister::DRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return FPRegister::Create(code, kDRegSizeInBits);
+}
+
+
+inline Register CPURegister::W() const {
+ ASSERT(IsValidRegister());
+ return Register::WRegFromCode(reg_code);
+}
+
+
+inline Register CPURegister::X() const {
+ ASSERT(IsValidRegister());
+ return Register::XRegFromCode(reg_code);
+}
+
+
+inline FPRegister CPURegister::S() const {
+ ASSERT(IsValidFPRegister());
+ return FPRegister::SRegFromCode(reg_code);
+}
+
+
+inline FPRegister CPURegister::D() const {
+ ASSERT(IsValidFPRegister());
+ return FPRegister::DRegFromCode(reg_code);
+}
+
+
+// Operand.
+template<typename T>
+Operand::Operand(Handle<T> value) : reg_(NoReg) {
+ initialize_handle(value);
+}
+
+
+// Default initializer is for int types
+template<typename int_t>
+struct OperandInitializer {
+ static const bool kIsIntType = true;
+ static inline RelocInfo::Mode rmode_for(int_t) {
+ return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
+ }
+ static inline int64_t immediate_for(int_t t) {
+ STATIC_ASSERT(sizeof(int_t) <= 8);
+ return t;
+ }
+};
+
+
+template<>
+struct OperandInitializer<Smi*> {
+ static const bool kIsIntType = false;
+ static inline RelocInfo::Mode rmode_for(Smi* t) {
+ return RelocInfo::NONE64;
+ }
+ static inline int64_t immediate_for(Smi* t) {;
+ return reinterpret_cast<int64_t>(t);
+ }
+};
+
+
+template<>
+struct OperandInitializer<ExternalReference> {
+ static const bool kIsIntType = false;
+ static inline RelocInfo::Mode rmode_for(ExternalReference t) {
+ return RelocInfo::EXTERNAL_REFERENCE;
+ }
+ static inline int64_t immediate_for(ExternalReference t) {;
+ return reinterpret_cast<int64_t>(t.address());
+ }
+};
+
+
+template<typename T>
+Operand::Operand(T t)
+ : immediate_(OperandInitializer<T>::immediate_for(t)),
+ reg_(NoReg),
+ rmode_(OperandInitializer<T>::rmode_for(t)) {}
+
+
+template<typename T>
+Operand::Operand(T t, RelocInfo::Mode rmode)
+ : immediate_(OperandInitializer<T>::immediate_for(t)),
+ reg_(NoReg),
+ rmode_(rmode) {
+ STATIC_ASSERT(OperandInitializer<T>::kIsIntType);
+}
+
+
+Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
+ : reg_(reg),
+ shift_(shift),
+ extend_(NO_EXTEND),
+ shift_amount_(shift_amount),
+ rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
+ ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
+ ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
+ ASSERT(!reg.IsSP());
+}
+
+
+Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
+ : reg_(reg),
+ shift_(NO_SHIFT),
+ extend_(extend),
+ shift_amount_(shift_amount),
+ rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
+ ASSERT(reg.IsValid());
+ ASSERT(shift_amount <= 4);
+ ASSERT(!reg.IsSP());
+
+ // Extend modes SXTX and UXTX require a 64-bit register.
+ ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
+}
+
+
+bool Operand::IsImmediate() const {
+ return reg_.Is(NoReg);
+}
+
+
+bool Operand::IsShiftedRegister() const {
+ return reg_.IsValid() && (shift_ != NO_SHIFT);
+}
+
+
+bool Operand::IsExtendedRegister() const {
+ return reg_.IsValid() && (extend_ != NO_EXTEND);
+}
+
+
+bool Operand::IsZero() const {
+ if (IsImmediate()) {
+ return immediate() == 0;
+ } else {
+ return reg().IsZero();
+ }
+}
+
+
+Operand Operand::ToExtendedRegister() const {
+ ASSERT(IsShiftedRegister());
+ ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
+ return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
+}
+
+
+int64_t Operand::immediate() const {
+ ASSERT(IsImmediate());
+ return immediate_;
+}
+
+
+Register Operand::reg() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return reg_;
+}
+
+
+Shift Operand::shift() const {
+ ASSERT(IsShiftedRegister());
+ return shift_;
+}
+
+
+Extend Operand::extend() const {
+ ASSERT(IsExtendedRegister());
+ return extend_;
+}
+
+
+unsigned Operand::shift_amount() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return shift_amount_;
+}
+
+
+Operand Operand::UntagSmi(Register smi) {
+ ASSERT(smi.Is64Bits());
+ return Operand(smi, ASR, kSmiShift);
+}
+
+
+Operand Operand::UntagSmiAndScale(Register smi, int scale) {
+ ASSERT(smi.Is64Bits());
+ ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
+ if (scale > kSmiShift) {
+ return Operand(smi, LSL, scale - kSmiShift);
+ } else if (scale < kSmiShift) {
+ return Operand(smi, ASR, kSmiShift - scale);
+ }
+ return Operand(smi);
+}
+
+
+MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
+ shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+ ASSERT(!regoffset.IsSP());
+ ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
+
+ // SXTX extend mode requires a 64-bit offset register.
+ ASSERT(regoffset.Is64Bits() || (extend != SXTX));
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Shift shift,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+ ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
+ ASSERT(shift == LSL);
+}
+
+
+MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
+ : base_(base), addrmode_(addrmode) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+
+ if (offset.IsImmediate()) {
+ offset_ = offset.immediate();
+
+ regoffset_ = NoReg;
+ } else if (offset.IsShiftedRegister()) {
+ ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ shift_= offset.shift();
+ shift_amount_ = offset.shift_amount();
+
+ extend_ = NO_EXTEND;
+ offset_ = 0;
+
+ // These assertions match those in the shifted-register constructor.
+ ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
+ ASSERT(shift_ == LSL);
+ } else {
+ ASSERT(offset.IsExtendedRegister());
+ ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ extend_ = offset.extend();
+ shift_amount_ = offset.shift_amount();
+
+ shift_= NO_SHIFT;
+ offset_ = 0;
+
+ // These assertions match those in the extended-register constructor.
+ ASSERT(!regoffset_.IsSP());
+ ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
+ ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
+ }
+}
+
+bool MemOperand::IsImmediateOffset() const {
+ return (addrmode_ == Offset) && regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsRegisterOffset() const {
+ return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsPreIndex() const {
+ return addrmode_ == PreIndex;
+}
+
+
+bool MemOperand::IsPostIndex() const {
+ return addrmode_ == PostIndex;
+}
+
+Operand MemOperand::OffsetAsOperand() const {
+ if (IsImmediateOffset()) {
+ return offset();
+ } else {
+ ASSERT(IsRegisterOffset());
+ if (extend() == NO_EXTEND) {
+ return Operand(regoffset(), shift(), shift_amount());
+ } else {
+ return Operand(regoffset(), extend(), shift_amount());
+ }
+ }
+}
+
+
+void Assembler::Unreachable() {
+#ifdef USE_SIMULATOR
+ debug("UNREACHABLE", __LINE__, BREAK);
+#else
+ // Crash by branching to 0. lr now points near the fault.
+ Emit(BLR | Rn(xzr));
+#endif
+}
+
+
+Address Assembler::target_pointer_address_at(Address pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ ASSERT(instr->IsLdrLiteralX());
+ return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
+}
+
+
+// Read/Modify the code target address in the branch/call instruction at pc.
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
+ return Memory::Address_at(target_pointer_address_at(pc));
+}
+
+
+Address Assembler::target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ // Returns the address of the call target from the return address that will
+ // be returned to after a call.
+ // Call sequence on ARM64 is:
+ // ldr ip0, #... @ load from literal pool
+ // blr ip0
+ Address candidate = pc - 2 * kInstructionSize;
+ Instruction* instr = reinterpret_cast<Instruction*>(candidate);
+ USE(instr);
+ ASSERT(instr->IsLdrLiteralX());
+ return candidate;
+}
+
+
+Address Assembler::return_address_from_call_start(Address pc) {
+ // The call, generated by MacroAssembler::Call, is one of two possible
+ // sequences:
+ //
+ // Without relocation:
+ // movz temp, #(target & 0x000000000000ffff)
+ // movk temp, #(target & 0x00000000ffff0000)
+ // movk temp, #(target & 0x0000ffff00000000)
+ // blr temp
+ //
+ // With relocation:
+ // ldr temp, =target
+ // blr temp
+ //
+ // The return address is immediately after the blr instruction in both cases,
+ // so it can be found by adding the call size to the address at the start of
+ // the call sequence.
+ STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 4 * kInstructionSize);
+ STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
+
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ if (instr->IsMovz()) {
+ // Verify the instruction sequence.
+ ASSERT(instr->following(1)->IsMovk());
+ ASSERT(instr->following(2)->IsMovk());
+ ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
+ return pc + Assembler::kCallSizeWithoutRelocation;
+ } else {
+ // Verify the instruction sequence.
+ ASSERT(instr->IsLdrLiteralX());
+ ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
+ return pc + Assembler::kCallSizeWithRelocation;
+ }
+}
+
+
+void Assembler::deserialization_set_special_target_at(
+ Address constant_pool_entry, Code* code, Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target) {
+ Memory::Address_at(target_pointer_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to always flush the
+ // instruction cache after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, an instruction is actually patched in the case of
+ // embedded constants of the form:
+ // ldr ip, [pc, #...]
+ // since the instruction accessing this address in the constant pool remains
+ // unchanged, a flush is not required.
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ Code* code,
+ Address target) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+}
+
+
+int RelocInfo::target_address_size() {
+ return kPointerSize;
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_pointer_address_at(pc_);
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ ASSERT(IsInConstantPool());
+ return Assembler::target_pointer_address_at(pc_);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Handle<Object>(reinterpret_cast<Object**>(
+ Assembler::target_address_at(pc_, host_)));
+}
+
+
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
+}
+
+
+Address RelocInfo::target_reference() {
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) set_target_address(target, mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ UNIMPLEMENTED();
+ Cell *null_cell = NULL;
+ return Handle<Cell>(null_cell);
+}
+
+
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ UNIMPLEMENTED();
+}
+
+
+static const int kCodeAgeSequenceSize = 5 * kInstructionSize;
+static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on ARM64.
+ return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(!Code::IsYoungSequence(pc_));
+ // Read the stub entry point from the code age sequence.
+ Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
+ return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(!Code::IsYoungSequence(pc_));
+ // Overwrite the stub entry point in the code age sequence. This is loaded as
+ // a literal so there is no need to call FlushICache here.
+ Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
+ Memory::Address_at(stub_entry_address) = stub->instruction_start();
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // For the above sequences the Relocinfo points to the load literal loading
+ // the call address.
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ Assembler::set_target_address_at(pc_, host_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+void RelocInfo::WipeOut() {
+ ASSERT(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, host_, NULL);
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ // The sequence must be:
+ // ldr ip0, [pc, #offset]
+ // blr ip0
+ // See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
+ Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
+ Instruction* i2 = i1->following();
+ return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
+ i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
+ return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ visitor->VisitEmbeddedPointer(this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ isolate->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
+#endif
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+#endif
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
+LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
+ ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x : LDR_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDR_d : LDR_s;
+ }
+}
+
+
+LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDP_x : LDP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDP_d : LDP_s;
+ }
+}
+
+
+LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
+ ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STR_x : STR_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STR_d : STR_s;
+ }
+}
+
+
+LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STP_x : STP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STP_d : STP_s;
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDNP_x : LDNP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDNP_d : LDNP_s;
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STNP_x : STNP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STNP_d : STNP_s;
+ }
+}
+
+
+int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
+ ASSERT(kStartOfLabelLinkChain == 0);
+ int offset = LinkAndGetByteOffsetTo(label);
+ ASSERT(IsAligned(offset, kInstructionSize));
+ return offset >> kInstructionSizeLog2;
+}
+
+
+Instr Assembler::Flags(FlagsUpdate S) {
+ if (S == SetFlags) {
+ return 1 << FlagsUpdate_offset;
+ } else if (S == LeaveFlags) {
+ return 0 << FlagsUpdate_offset;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+Instr Assembler::Cond(Condition cond) {
+ return cond << Condition_offset;
+}
+
+
+Instr Assembler::ImmPCRelAddress(int imm21) {
+ CHECK(is_int21(imm21));
+ Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
+ Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
+ Instr immlo = imm << ImmPCRelLo_offset;
+ return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
+}
+
+
+Instr Assembler::ImmUncondBranch(int imm26) {
+ CHECK(is_int26(imm26));
+ return truncate_to_int26(imm26) << ImmUncondBranch_offset;
+}
+
+
+Instr Assembler::ImmCondBranch(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCondBranch_offset;
+}
+
+
+Instr Assembler::ImmCmpBranch(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCmpBranch_offset;
+}
+
+
+Instr Assembler::ImmTestBranch(int imm14) {
+ CHECK(is_int14(imm14));
+ return truncate_to_int14(imm14) << ImmTestBranch_offset;
+}
+
+
+Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
+ ASSERT(is_uint6(bit_pos));
+ // Subtract five from the shift offset, as we need bit 5 from bit_pos.
+ unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
+ unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
+ b5 &= ImmTestBranchBit5_mask;
+ b40 &= ImmTestBranchBit40_mask;
+ return b5 | b40;
+}
+
+
+Instr Assembler::SF(Register rd) {
+ return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+}
+
+
+Instr Assembler::ImmAddSub(int64_t imm) {
+ ASSERT(IsImmAddSub(imm));
+ if (is_uint12(imm)) { // No shift required.
+ return imm << ImmAddSub_offset;
+ } else {
+ return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+ }
+}
+
+
+Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
+ ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
+ USE(reg_size);
+ return imms << ImmS_offset;
+}
+
+
+Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
+ ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
+ USE(reg_size);
+ ASSERT(is_uint6(immr));
+ return immr << ImmR_offset;
+}
+
+
+Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ ASSERT(is_uint6(imms));
+ ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
+ USE(reg_size);
+ return imms << ImmSetBits_offset;
+}
+
+
+Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
+ ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
+ USE(reg_size);
+ return immr << ImmRotate_offset;
+}
+
+
+Instr Assembler::ImmLLiteral(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmLLiteral_offset;
+}
+
+
+Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0));
+ USE(reg_size);
+ return bitn << BitN_offset;
+}
+
+
+Instr Assembler::ShiftDP(Shift shift) {
+ ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ return shift << ShiftDP_offset;
+}
+
+
+Instr Assembler::ImmDPShift(unsigned amount) {
+ ASSERT(is_uint6(amount));
+ return amount << ImmDPShift_offset;
+}
+
+
+Instr Assembler::ExtendMode(Extend extend) {
+ return extend << ExtendMode_offset;
+}
+
+
+Instr Assembler::ImmExtendShift(unsigned left_shift) {
+ ASSERT(left_shift <= 4);
+ return left_shift << ImmExtendShift_offset;
+}
+
+
+Instr Assembler::ImmCondCmp(unsigned imm) {
+ ASSERT(is_uint5(imm));
+ return imm << ImmCondCmp_offset;
+}
+
+
+Instr Assembler::Nzcv(StatusFlags nzcv) {
+ return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
+}
+
+
+Instr Assembler::ImmLSUnsigned(int imm12) {
+ ASSERT(is_uint12(imm12));
+ return imm12 << ImmLSUnsigned_offset;
+}
+
+
+Instr Assembler::ImmLS(int imm9) {
+ ASSERT(is_int9(imm9));
+ return truncate_to_int9(imm9) << ImmLS_offset;
+}
+
+
+Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
+ ASSERT(((imm7 >> size) << size) == imm7);
+ int scaled_imm7 = imm7 >> size;
+ ASSERT(is_int7(scaled_imm7));
+ return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
+}
+
+
+Instr Assembler::ImmShiftLS(unsigned shift_amount) {
+ ASSERT(is_uint1(shift_amount));
+ return shift_amount << ImmShiftLS_offset;
+}
+
+
+Instr Assembler::ImmException(int imm16) {
+ ASSERT(is_uint16(imm16));
+ return imm16 << ImmException_offset;
+}
+
+
+Instr Assembler::ImmSystemRegister(int imm15) {
+ ASSERT(is_uint15(imm15));
+ return imm15 << ImmSystemRegister_offset;
+}
+
+
+Instr Assembler::ImmHint(int imm7) {
+ ASSERT(is_uint7(imm7));
+ return imm7 << ImmHint_offset;
+}
+
+
+Instr Assembler::ImmBarrierDomain(int imm2) {
+ ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierDomain_offset;
+}
+
+
+Instr Assembler::ImmBarrierType(int imm2) {
+ ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierType_offset;
+}
+
+
+LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
+ ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
+ return static_cast<LSDataSize>(op >> SizeLS_offset);
+}
+
+
+Instr Assembler::ImmMoveWide(uint64_t imm) {
+ ASSERT(is_uint16(imm));
+ return imm << ImmMoveWide_offset;
+}
+
+
+Instr Assembler::ShiftMoveWide(int64_t shift) {
+ ASSERT(is_uint2(shift));
+ return shift << ShiftMoveWide_offset;
+}
+
+
+Instr Assembler::FPType(FPRegister fd) {
+ return fd.Is64Bits() ? FP64 : FP32;
+}
+
+
+Instr Assembler::FPScale(unsigned scale) {
+ ASSERT(is_uint6(scale));
+ return scale << FPScale_offset;
+}
+
+
+const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
+ return reg.Is64Bits() ? xzr : wzr;
+}
+
+
+void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
+ LoadRelocatedValue(rt, operand, LDR_x_lit);
+}
+
+
+inline void Assembler::CheckBuffer() {
+ ASSERT(pc_ < (buffer_ + buffer_size_));
+ if (buffer_space() < kGap) {
+ GrowBuffer();
+ }
+ if (pc_offset() >= next_veneer_pool_check_) {
+ CheckVeneerPool(false, true);
+ }
+ if (pc_offset() >= next_constant_pool_check_) {
+ CheckConstPool(false, true);
+ }
+}
+
+
+TypeFeedbackId Assembler::RecordedAstId() {
+ ASSERT(!recorded_ast_id_.IsNone());
+ return recorded_ast_id_;
+}
+
+
+void Assembler::ClearRecordedAstId() {
+ recorded_ast_id_ = TypeFeedbackId::None();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
new file mode 100644
index 0000000000..8bee92ccc2
--- /dev/null
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -0,0 +1,2813 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#define ARM64_DEFINE_REG_STATICS
+
+#include "arm64/assembler-arm64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// CpuFeatures utilities (for V8 compatibility).
+
+ExternalReference ExternalReference::cpu_features() {
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
+// -----------------------------------------------------------------------------
+// CPURegList utilities.
+
+CPURegister CPURegList::PopLowestIndex() {
+ ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountTrailingZeros(list_, kRegListSizeInBits);
+ ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister::Create(index, size_, type_);
+}
+
+
+CPURegister CPURegList::PopHighestIndex() {
+ ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountLeadingZeros(list_, kRegListSizeInBits);
+ index = kRegListSizeInBits - 1 - index;
+ ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister::Create(index, size_, type_);
+}
+
+
+void CPURegList::RemoveCalleeSaved() {
+ if (type() == CPURegister::kRegister) {
+ Remove(GetCalleeSaved(RegisterSizeInBits()));
+ } else if (type() == CPURegister::kFPRegister) {
+ Remove(GetCalleeSavedFP(RegisterSizeInBits()));
+ } else {
+ ASSERT(type() == CPURegister::kNoRegister);
+ ASSERT(IsEmpty());
+ // The list must already be empty, so do nothing.
+ }
+}
+
+
+CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+ return CPURegList(CPURegister::kRegister, size, 19, 29);
+}
+
+
+CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
+ return CPURegList(CPURegister::kFPRegister, size, 8, 15);
+}
+
+
+CPURegList CPURegList::GetCallerSaved(unsigned size) {
+ // Registers x0-x18 and lr (x30) are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
+ list.Combine(lr);
+ return list;
+}
+
+
+CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
+ // Registers d0-d7 and d16-d31 are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
+ list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
+ return list;
+}
+
+
+// This function defines the list of registers which are associated with a
+// safepoint slot. Safepoint register slots are saved contiguously on the stack.
+// MacroAssembler::SafepointRegisterStackIndex handles mapping from register
+// code to index in the safepoint register slots. Any change here can affect
+// this mapping.
+CPURegList CPURegList::GetSafepointSavedRegisters() {
+ CPURegList list = CPURegList::GetCalleeSaved();
+ list.Combine(
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));
+
+ // Note that unfortunately we can't use symbolic names for registers and have
+ // to directly use register codes. This is because this function is used to
+ // initialize some static variables and we can't rely on register variables
+ // to be initialized due to static initialization order issues in C++.
+
+ // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
+ // preserved outside of the macro assembler.
+ list.Remove(16);
+ list.Remove(17);
+
+ // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
+ // is a caller-saved register according to the procedure call standard.
+ list.Combine(18);
+
+ // Drop jssp as the stack pointer doesn't need to be included.
+ list.Remove(28);
+
+ // Add the link register (x30) to the safepoint list.
+ list.Combine(30);
+
+ return list;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on ARM64 means that it is a movz/movk sequence. We don't
+ // generate those for relocatable pointers.
+ return false;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ return instr->IsLdrLiteralX();
+}
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count * kInstructionSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ UNIMPLEMENTED();
+}
+
+
+Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
+ Register reg3, Register reg4) {
+ CPURegList regs(reg1, reg2, reg3, reg4);
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs.IncludesAliasOf(candidate)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return NoReg;
+}
+
+
+bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ int number_of_valid_regs = 0;
+ int number_of_valid_fpregs = 0;
+
+ RegList unique_regs = 0;
+ RegList unique_fpregs = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
+
+ for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
+ if (regs[i].IsRegister()) {
+ number_of_valid_regs++;
+ unique_regs |= regs[i].Bit();
+ } else if (regs[i].IsFPRegister()) {
+ number_of_valid_fpregs++;
+ unique_fpregs |= regs[i].Bit();
+ } else {
+ ASSERT(!regs[i].IsValid());
+ }
+ }
+
+ int number_of_unique_regs =
+ CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
+ int number_of_unique_fpregs =
+ CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
+
+ ASSERT(number_of_valid_regs >= number_of_unique_regs);
+ ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
+
+ return (number_of_valid_regs != number_of_unique_regs) ||
+ (number_of_valid_fpregs != number_of_unique_fpregs);
+}
+
+
+bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
+ match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
+ match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
+ match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
+ match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
+ return match;
+}
+
+
+void Operand::initialize_handle(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
+
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ immediate_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
+ immediate_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE64;
+ }
+}
+
+
+bool Operand::NeedsRelocation() const {
+ if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ return Serializer::enabled();
+ }
+
+ return !RelocInfo::IsNone(rmode_);
+}
+
+
+// Assembler
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ recorded_ast_id_(TypeFeedbackId::None()),
+ unresolved_branches_(),
+ positions_recorder_(this) {
+ const_pool_blocked_nesting_ = 0;
+ veneer_pool_blocked_nesting_ = 0;
+ Reset();
+}
+
+
+Assembler::~Assembler() {
+ ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(const_pool_blocked_nesting_ == 0);
+ ASSERT(veneer_pool_blocked_nesting_ == 0);
+}
+
+
+void Assembler::Reset() {
+#ifdef DEBUG
+ ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
+ ASSERT(const_pool_blocked_nesting_ == 0);
+ ASSERT(veneer_pool_blocked_nesting_ == 0);
+ ASSERT(unresolved_branches_.empty());
+ memset(buffer_, 0, pc_ - buffer_);
+#endif
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
+ reinterpret_cast<byte*>(pc_));
+ num_pending_reloc_info_ = 0;
+ next_constant_pool_check_ = 0;
+ next_veneer_pool_check_ = kMaxInt;
+ no_const_pool_before_ = 0;
+ first_const_pool_use_ = -1;
+ ClearRecordedAstId();
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ ASSERT(num_pending_reloc_info_ == 0);
+
+ // Set up code descriptor.
+ if (desc) {
+ desc->buffer = reinterpret_cast<byte*>(buffer_);
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
+ reloc_info_writer.pos();
+ desc->origin = this;
+ }
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::CheckLabelLinkChain(Label const * label) {
+#ifdef DEBUG
+ if (label->is_linked()) {
+ int linkoffset = label->pos();
+ bool end_of_chain = false;
+ while (!end_of_chain) {
+ Instruction * link = InstructionAt(linkoffset);
+ int linkpcoffset = link->ImmPCOffset();
+ int prevlinkoffset = linkoffset + linkpcoffset;
+
+ end_of_chain = (linkoffset == prevlinkoffset);
+ linkoffset = linkoffset + linkpcoffset;
+ }
+ }
+#endif
+}
+
+
+void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
+ Label* label,
+ Instruction* label_veneer) {
+ ASSERT(label->is_linked());
+
+ CheckLabelLinkChain(label);
+
+ Instruction* link = InstructionAt(label->pos());
+ Instruction* prev_link = link;
+ Instruction* next_link;
+ bool end_of_chain = false;
+
+ while (link != branch && !end_of_chain) {
+ next_link = link->ImmPCOffsetTarget();
+ end_of_chain = (link == next_link);
+ prev_link = link;
+ link = next_link;
+ }
+
+ ASSERT(branch == link);
+ next_link = branch->ImmPCOffsetTarget();
+
+ if (branch == prev_link) {
+ // The branch is the first instruction in the chain.
+ if (branch == next_link) {
+ // It is also the last instruction in the chain, so it is the only branch
+ // currently referring to this label.
+ label->Unuse();
+ } else {
+ label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
+ }
+
+ } else if (branch == next_link) {
+ // The branch is the last (but not also the first) instruction in the chain.
+ prev_link->SetImmPCOffsetTarget(prev_link);
+
+ } else {
+ // The branch is in the middle of the chain.
+ if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
+ prev_link->SetImmPCOffsetTarget(next_link);
+ } else if (label_veneer != NULL) {
+ // Use the veneer for all previous links in the chain.
+ prev_link->SetImmPCOffsetTarget(prev_link);
+
+ end_of_chain = false;
+ link = next_link;
+ while (!end_of_chain) {
+ next_link = link->ImmPCOffsetTarget();
+ end_of_chain = (link == next_link);
+ link->SetImmPCOffsetTarget(label_veneer);
+ link = next_link;
+ }
+ } else {
+ // The assert below will fire.
+ // Some other work could be attempted to fix up the chain, but it would be
+ // rather complicated. If we crash here, we may want to consider using an
+ // other mechanism than a chain of branches.
+ //
+ // Note that this situation currently should not happen, as we always call
+ // this function with a veneer to the target label.
+ // However this could happen with a MacroAssembler in the following state:
+ // [previous code]
+ // B(label);
+ // [20KB code]
+ // Tbz(label); // First tbz. Pointing to unconditional branch.
+ // [20KB code]
+ // Tbz(label); // Second tbz. Pointing to the first tbz.
+ // [more code]
+ // and this function is called to remove the first tbz from the label link
+ // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
+ // the unconditional branch.
+ CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
+ UNREACHABLE();
+ }
+ }
+
+ CheckLabelLinkChain(label);
+}
+
+
+void Assembler::bind(Label* label) {
+ // Bind label to the address at pc_. All instructions (most likely branches)
+ // that are linked to this label will be updated to point to the newly-bound
+ // label.
+
+ ASSERT(!label->is_near_linked());
+ ASSERT(!label->is_bound());
+
+ // If the label is linked, the link chain looks something like this:
+ //
+ // |--I----I-------I-------L
+ // |---------------------->| pc_offset
+ // |-------------->| linkoffset = label->pos()
+ // |<------| link->ImmPCOffset()
+ // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset()
+ //
+ // On each iteration, the last link is updated and then removed from the
+ // chain until only one remains. At that point, the label is bound.
+ //
+ // If the label is not linked, no preparation is required before binding.
+ while (label->is_linked()) {
+ int linkoffset = label->pos();
+ Instruction* link = InstructionAt(linkoffset);
+ int prevlinkoffset = linkoffset + link->ImmPCOffset();
+
+ CheckLabelLinkChain(label);
+
+ ASSERT(linkoffset >= 0);
+ ASSERT(linkoffset < pc_offset());
+ ASSERT((linkoffset > prevlinkoffset) ||
+ (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
+ ASSERT(prevlinkoffset >= 0);
+
+ // Update the link to point to the label.
+ link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+
+ // Link the label to the previous link in the chain.
+ if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
+ // We hit kStartOfLabelLinkChain, so the chain is fully processed.
+ label->Unuse();
+ } else {
+ // Update the label for the next iteration.
+ label->link_to(prevlinkoffset);
+ }
+ }
+ label->bind_to(pc_offset());
+
+ ASSERT(label->is_bound());
+ ASSERT(!label->is_linked());
+
+ DeleteUnresolvedBranchInfoForLabel(label);
+}
+
+
+int Assembler::LinkAndGetByteOffsetTo(Label* label) {
+ ASSERT(sizeof(*pc_) == 1);
+ CheckLabelLinkChain(label);
+
+ int offset;
+ if (label->is_bound()) {
+ // The label is bound, so it does not need to be updated. Referring
+ // instructions must link directly to the label as they will not be
+ // updated.
+ //
+ // In this case, label->pos() returns the offset of the label from the
+ // start of the buffer.
+ //
+ // Note that offset can be zero for self-referential instructions. (This
+ // could be useful for ADR, for example.)
+ offset = label->pos() - pc_offset();
+ ASSERT(offset <= 0);
+ } else {
+ if (label->is_linked()) {
+ // The label is linked, so the referring instruction should be added onto
+ // the end of the label's link chain.
+ //
+ // In this case, label->pos() returns the offset of the last linked
+ // instruction from the start of the buffer.
+ offset = label->pos() - pc_offset();
+ ASSERT(offset != kStartOfLabelLinkChain);
+ // Note that the offset here needs to be PC-relative only so that the
+ // first instruction in a buffer can link to an unbound label. Otherwise,
+ // the offset would be 0 for this case, and 0 is reserved for
+ // kStartOfLabelLinkChain.
+ } else {
+ // The label is unused, so it now becomes linked and the referring
+ // instruction is at the start of the new link chain.
+ offset = kStartOfLabelLinkChain;
+ }
+ // The instruction at pc is now the last link in the label's chain.
+ label->link_to(pc_offset());
+ }
+
+ return offset;
+}
+
+
+void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
+ if (unresolved_branches_.empty()) {
+ ASSERT(next_veneer_pool_check_ == kMaxInt);
+ return;
+ }
+
+ // Branches to this label will be resolved when the label is bound below.
+ std::multimap<int, FarBranchInfo>::iterator it_tmp, it;
+ it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ it_tmp = it++;
+ if (it_tmp->second.label_ == label) {
+ CHECK(it_tmp->first >= pc_offset());
+ unresolved_branches_.erase(it_tmp);
+ }
+ }
+ if (unresolved_branches_.empty()) {
+ next_veneer_pool_check_ = kMaxInt;
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+}
+
+
+void Assembler::StartBlockConstPool() {
+ if (const_pool_blocked_nesting_++ == 0) {
+ // Prevent constant pool checks happening by setting the next check to
+ // the biggest possible offset.
+ next_constant_pool_check_ = kMaxInt;
+ }
+}
+
+
+void Assembler::EndBlockConstPool() {
+ if (--const_pool_blocked_nesting_ == 0) {
+ // Check the constant pool hasn't been blocked for too long.
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
+ // Two cases:
+ // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
+ // still blocked
+ // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
+ // will trigger a check.
+ next_constant_pool_check_ = no_const_pool_before_;
+ }
+}
+
+
+bool Assembler::is_const_pool_blocked() const {
+ return (const_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_const_pool_before_);
+}
+
+
+bool Assembler::IsConstantPoolAt(Instruction* instr) {
+ // The constant pool marker is made of two instructions. These instructions
+ // will never be emitted by the JIT, so checking for the first one is enough:
+ // 0: ldr xzr, #<size of pool>
+ bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
+
+ // It is still worth asserting the marker is complete.
+ // 4: blr xzr
+ ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
+ instr->following()->Rn() == xzr.code()));
+
+ return result;
+}
+
+
+int Assembler::ConstantPoolSizeAt(Instruction* instr) {
+#ifdef USE_SIMULATOR
+ // Assembler::debug() embeds constants directly into the instruction stream.
+ // Although this is not a genuine constant pool, treat it like one to avoid
+ // disassembling the constants.
+ if ((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kImmExceptionIsDebug)) {
+ const char* message =
+ reinterpret_cast<const char*>(
+ instr->InstructionAtOffset(kDebugMessageOffset));
+ int size = kDebugMessageOffset + strlen(message) + 1;
+ return RoundUp(size, kInstructionSize) / kInstructionSize;
+ }
+ // Same for printf support, see MacroAssembler::CallPrintf().
+ if ((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kImmExceptionIsPrintf)) {
+ return kPrintfLength / kInstructionSize;
+ }
+#endif
+ if (IsConstantPoolAt(instr)) {
+ return instr->ImmLLiteral();
+ } else {
+ return -1;
+ }
+}
+
+
+void Assembler::ConstantPoolMarker(uint32_t size) {
+ ASSERT(is_const_pool_blocked());
+ // + 1 is for the crash guard.
+ Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr));
+}
+
+
+void Assembler::EmitPoolGuard() {
+ // We must generate only one instruction as this is used in scopes that
+ // control the size of the code generated.
+ Emit(BLR | Rn(xzr));
+}
+
+
+void Assembler::ConstantPoolGuard() {
+#ifdef DEBUG
+ // Currently this is only used after a constant pool marker.
+ ASSERT(is_const_pool_blocked());
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ ASSERT(instr->preceding()->IsLdrLiteralX() &&
+ instr->preceding()->Rt() == xzr.code());
+#endif
+ EmitPoolGuard();
+}
+
+
+void Assembler::StartBlockVeneerPool() {
+ ++veneer_pool_blocked_nesting_;
+}
+
+
+void Assembler::EndBlockVeneerPool() {
+ if (--veneer_pool_blocked_nesting_ == 0) {
+ // Check the veneer pool hasn't been blocked for too long.
+ ASSERT(unresolved_branches_.empty() ||
+ (pc_offset() < unresolved_branches_first_limit()));
+ }
+}
+
+
+void Assembler::br(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ Emit(BR | Rn(xn));
+}
+
+
+void Assembler::blr(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ // The pattern 'blr xzr' is used as a guard to detect when execution falls
+ // through the constant pool. It should not be emitted.
+ ASSERT(!xn.Is(xzr));
+ Emit(BLR | Rn(xn));
+}
+
+
+void Assembler::ret(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ Emit(RET | Rn(xn));
+}
+
+
+void Assembler::b(int imm26) {
+ Emit(B | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::b(Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ b(LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::b(int imm19, Condition cond) {
+ Emit(B_cond | ImmCondBranch(imm19) | cond);
+}
+
+
+void Assembler::b(Label* label, Condition cond) {
+ positions_recorder()->WriteRecordedPositions();
+ b(LinkAndGetInstructionOffsetTo(label), cond);
+}
+
+
+void Assembler::bl(int imm26) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(BL | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::bl(Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ bl(LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::cbz(const Register& rt,
+ int imm19) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbz(const Register& rt,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ cbz(rt, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::cbnz(const Register& rt,
+ int imm19) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbnz(const Register& rt,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ cbnz(rt, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::tbz(const Register& rt,
+ unsigned bit_pos,
+ int imm14) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
+ Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbz(const Register& rt,
+ unsigned bit_pos,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::tbnz(const Register& rt,
+ unsigned bit_pos,
+ int imm14) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
+ Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbnz(const Register& rt,
+ unsigned bit_pos,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::adr(const Register& rd, int imm21) {
+ ASSERT(rd.Is64Bits());
+ Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adr(const Register& rd, Label* label) {
+ adr(rd, LinkAndGetByteOffsetTo(label));
+}
+
+
+void Assembler::add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, ADD);
+}
+
+
+void Assembler::adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, ADD);
+}
+
+
+void Assembler::cmn(const Register& rn,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ adds(zr, rn, operand);
+}
+
+
+void Assembler::sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, SUB);
+}
+
+
+void Assembler::subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, SUB);
+}
+
+
+void Assembler::cmp(const Register& rn, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ subs(zr, rn, operand);
+}
+
+
+void Assembler::neg(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sub(rd, zr, operand);
+}
+
+
+void Assembler::negs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ subs(rd, zr, operand);
+}
+
+
+void Assembler::adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void Assembler::adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void Assembler::sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void Assembler::sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void Assembler::ngc(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbc(rd, zr, operand);
+}
+
+
+void Assembler::ngcs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbcs(rd, zr, operand);
+}
+
+
+// Logical instructions.
+void Assembler::and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, AND);
+}
+
+
+void Assembler::ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ANDS);
+}
+
+
+void Assembler::tst(const Register& rn,
+ const Operand& operand) {
+ ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void Assembler::bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BIC);
+}
+
+
+void Assembler::bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BICS);
+}
+
+
+void Assembler::orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORR);
+}
+
+
+void Assembler::orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORN);
+}
+
+
+void Assembler::eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EOR);
+}
+
+
+void Assembler::eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EON);
+}
+
+
+void Assembler::lslv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::lsrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::asrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rorv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+// Bitfield operations.
+void Assembler::bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | BFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.Is64Bits() || rn.Is32Bits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | SBFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | UBFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | EXTR | N | Rm(rm) |
+ ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSEL);
+}
+
+
+void Assembler::csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINC);
+}
+
+
+void Assembler::csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINV);
+}
+
+
+void Assembler::csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSNEG);
+}
+
+
+void Assembler::cset(const Register &rd, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinc(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::csetm(const Register &rd, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinv(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMN);
+}
+
+
+void Assembler::ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMP);
+}
+
+
+void Assembler::DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op) {
+ Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ Register zr = AppropriateZeroRegFor(rn);
+ DataProcessing3Source(rd, rn, rm, zr, MADD);
+}
+
+
+void Assembler::madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DataProcessing3Source(rd, rn, rm, ra, MADD);
+}
+
+
+void Assembler::mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ Register zr = AppropriateZeroRegFor(rn);
+ DataProcessing3Source(rd, rn, rm, zr, MSUB);
+}
+
+
+void Assembler::msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DataProcessing3Source(rd, rn, rm, ra, MSUB);
+}
+
+
+void Assembler::smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
+}
+
+
+void Assembler::smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
+}
+
+
+void Assembler::umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
+}
+
+
+void Assembler::umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
+}
+
+
+void Assembler::smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
+}
+
+
+void Assembler::smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
+}
+
+
+void Assembler::sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rbit(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, RBIT);
+}
+
+
+void Assembler::rev16(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, REV16);
+}
+
+
+void Assembler::rev32(const Register& rd,
+ const Register& rn) {
+ ASSERT(rd.Is64Bits());
+ DataProcessing1Source(rd, rn, REV);
+}
+
+
+void Assembler::rev(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
+}
+
+
+void Assembler::clz(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLZ);
+}
+
+
+void Assembler::cls(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLS);
+}
+
+
+void Assembler::ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
+}
+
+
+void Assembler::stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
+}
+
+
+void Assembler::ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ ASSERT(rt.Is64Bits());
+ LoadStorePair(rt, rt2, src, LDPSW_x);
+}
+
+
+void Assembler::LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // 'rt' and 'rt2' can only be aliased for stores.
+ ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
+ ASSERT(AreSameSizeAndType(rt, rt2));
+
+ Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
+
+ Instr addrmodeop;
+ if (addr.IsImmediateOffset()) {
+ addrmodeop = LoadStorePairOffsetFixed;
+ } else {
+ // Pre-index and post-index modes.
+ ASSERT(!rt.Is(addr.base()));
+ ASSERT(!rt2.Is(addr.base()));
+ ASSERT(addr.offset() != 0);
+ if (addr.IsPreIndex()) {
+ addrmodeop = LoadStorePairPreIndexFixed;
+ } else {
+ ASSERT(addr.IsPostIndex());
+ addrmodeop = LoadStorePairPostIndexFixed;
+ }
+ }
+ Emit(addrmodeop | memop);
+}
+
+
+void Assembler::ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePairNonTemporal(rt, rt2, src,
+ LoadPairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePairNonTemporal(rt, rt2, dst,
+ StorePairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op) {
+ ASSERT(!rt.Is(rt2));
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ ASSERT(addr.IsImmediateOffset());
+
+ LSDataSize size = CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(op & LoadStorePairMask));
+ Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(addr.offset(), size));
+}
+
+
+// Memory instructions.
+void Assembler::ldrb(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, LDRB_w);
+}
+
+
+void Assembler::strb(const Register& rt, const MemOperand& dst) {
+ LoadStore(rt, dst, STRB_w);
+}
+
+
+void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
+}
+
+
+void Assembler::ldrh(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, LDRH_w);
+}
+
+
+void Assembler::strh(const Register& rt, const MemOperand& dst) {
+ LoadStore(rt, dst, STRH_w);
+}
+
+
+void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
+}
+
+
+void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
+ LoadStore(rt, src, LoadOpFor(rt));
+}
+
+
+void Assembler::str(const CPURegister& rt, const MemOperand& src) {
+ LoadStore(rt, src, StoreOpFor(rt));
+}
+
+
+void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
+ ASSERT(rt.Is64Bits());
+ LoadStore(rt, src, LDRSW_x);
+}
+
+
+void Assembler::ldr(const Register& rt, uint64_t imm) {
+ // TODO(all): Constant pool may be garbage collected. Hence we cannot store
+ // arbitrary values in them. Manually move it for now. Fix
+ // MacroAssembler::Fmov when this is implemented.
+ UNIMPLEMENTED();
+}
+
+
+void Assembler::ldr(const FPRegister& ft, double imm) {
+ // TODO(all): Constant pool may be garbage collected. Hence we cannot store
+ // arbitrary values in them. Manually move it for now. Fix
+ // MacroAssembler::Fmov when this is implemented.
+ UNIMPLEMENTED();
+}
+
+
+void Assembler::ldr(const FPRegister& ft, float imm) {
+ // TODO(all): Constant pool may be garbage collected. Hence we cannot store
+ // arbitrary values in them. Manually move it for now. Fix
+ // MacroAssembler::Fmov when this is implemented.
+ UNIMPLEMENTED();
+}
+
+
+void Assembler::mov(const Register& rd, const Register& rm) {
+ // Moves involving the stack pointer are encoded as add immediate with
+ // second operand of zero. Otherwise, orr with first operand zr is
+ // used.
+ if (rd.IsSP() || rm.IsSP()) {
+ add(rd, rm, 0);
+ } else {
+ orr(rd, AppropriateZeroRegFor(rd), rm);
+ }
+}
+
+
+void Assembler::mvn(const Register& rd, const Operand& operand) {
+ orn(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
+ ASSERT(rt.Is64Bits());
+ Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
+}
+
+
+void Assembler::msr(SystemRegister sysreg, const Register& rt) {
+ ASSERT(rt.Is64Bits());
+ Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
+}
+
+
+void Assembler::hint(SystemHint code) {
+ Emit(HINT | ImmHint(code) | Rt(xzr));
+}
+
+
+void Assembler::dmb(BarrierDomain domain, BarrierType type) {
+ Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::dsb(BarrierDomain domain, BarrierType type) {
+ Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::isb() {
+ Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
+}
+
+
+void Assembler::fmov(FPRegister fd, double imm) {
+ ASSERT(fd.Is64Bits());
+ ASSERT(IsImmFP64(imm));
+ Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
+}
+
+
+void Assembler::fmov(FPRegister fd, float imm) {
+ ASSERT(fd.Is32Bits());
+ ASSERT(IsImmFP32(imm));
+ Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
+}
+
+
+void Assembler::fmov(Register rd, FPRegister fn) {
+ ASSERT(rd.SizeInBits() == fn.SizeInBits());
+ FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
+ Emit(op | Rd(rd) | Rn(fn));
+}
+
+
+void Assembler::fmov(FPRegister fd, Register rn) {
+ ASSERT(fd.SizeInBits() == rn.SizeInBits());
+ FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
+ Emit(op | Rd(fd) | Rn(rn));
+}
+
+
+void Assembler::fmov(FPRegister fd, FPRegister fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
+}
+
+
+void Assembler::fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FADD);
+}
+
+
+void Assembler::fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FSUB);
+}
+
+
+void Assembler::fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMUL);
+}
+
+
+void Assembler::fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
+}
+
+
+void Assembler::fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
+}
+
+
+void Assembler::fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
+}
+
+
+void Assembler::fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
+}
+
+
+void Assembler::fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FDIV);
+}
+
+
+void Assembler::fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMAX);
+}
+
+
+void Assembler::fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMAXNM);
+}
+
+
+void Assembler::fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMIN);
+}
+
+
+void Assembler::fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMINNM);
+}
+
+
+void Assembler::fabs(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FABS);
+}
+
+
+void Assembler::fneg(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FNEG);
+}
+
+
+void Assembler::fsqrt(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FSQRT);
+}
+
+
+void Assembler::frinta(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTA);
+}
+
+
+void Assembler::frintn(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTN);
+}
+
+
+void Assembler::frintz(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTZ);
+}
+
+
+void Assembler::fcmp(const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
+}
+
+
+void Assembler::fcmp(const FPRegister& fn,
+ double value) {
+ USE(value);
+ // Although the fcmp instruction can strictly only take an immediate value of
+ // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
+ // affect the result of the comparison.
+ ASSERT(value == 0.0);
+ Emit(FPType(fn) | FCMP_zero | Rn(fn));
+}
+
+
+void Assembler::fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
+}
+
+
+void Assembler::fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op) {
+ Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
+}
+
+
+void Assembler::fcvt(const FPRegister& fd,
+ const FPRegister& fn) {
+ if (fd.Is64Bits()) {
+ // Convert float to double.
+ ASSERT(fn.Is32Bits());
+ FPDataProcessing1Source(fd, fn, FCVT_ds);
+ } else {
+ // Convert double to float.
+ ASSERT(fn.Is64Bits());
+ FPDataProcessing1Source(fd, fn, FCVT_sd);
+ }
+}
+
+
+void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTAU);
+}
+
+
+void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTAS);
+}
+
+
+void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTMU);
+}
+
+
+void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTMS);
+}
+
+
+void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTNU);
+}
+
+
+void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTNS);
+}
+
+
+void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTZU);
+}
+
+
+void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTZS);
+}
+
+
+void Assembler::scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
+}
+
+
+void Assembler::ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
+}
+
+
+// Note:
+// Below, a difference in case for the same letter indicates a
+// negated bit.
+// If b is 1, then B is 0.
+Instr Assembler::ImmFP32(float imm) {
+ ASSERT(IsImmFP32(imm));
+ // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+
+ return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+}
+
+
+Instr Assembler::ImmFP64(double imm) {
+ ASSERT(IsImmFP64(imm));
+ // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+ return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+}
+
+
+// Code generation helpers.
+void Assembler::MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op) {
+ if (shift >= 0) {
+ // Explicit shift specified.
+ ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
+ ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
+ shift /= 16;
+ } else {
+ // Calculate a new immediate and shift combination to encode the immediate
+ // argument.
+ shift = 0;
+ if ((imm & ~0xffffUL) == 0) {
+ // Nothing to do.
+ } else if ((imm & ~(0xffffUL << 16)) == 0) {
+ imm >>= 16;
+ shift = 1;
+ } else if ((imm & ~(0xffffUL << 32)) == 0) {
+ ASSERT(rd.Is64Bits());
+ imm >>= 32;
+ shift = 2;
+ } else if ((imm & ~(0xffffUL << 48)) == 0) {
+ ASSERT(rd.Is64Bits());
+ imm >>= 48;
+ shift = 3;
+ }
+ }
+
+ ASSERT(is_uint16(imm));
+
+ Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
+ Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
+}
+
+
+void Assembler::AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ ASSERT(IsImmAddSub(immediate));
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
+ ImmAddSub(immediate) | dest_reg | RnSP(rn));
+ } else if (operand.IsShiftedRegister()) {
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ ASSERT(operand.shift() != ROR);
+
+ // For instructions of the form:
+ // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
+ // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
+ // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // or their 64-bit register equivalents, convert the operand from shifted to
+ // extended register mode, and emit an add/sub extended instruction.
+ if (rn.IsSP() || rd.IsSP()) {
+ ASSERT(!(rd.IsSP() && (S == SetFlags)));
+ DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
+ AddSubExtendedFixed | op);
+ } else {
+ DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
+ }
+ } else {
+ ASSERT(operand.IsExtendedRegister());
+ DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
+ }
+}
+
+
+void Assembler::AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
+ ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ASSERT(!operand.NeedsRelocation());
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::hlt(int code) {
+ ASSERT(is_uint16(code));
+ Emit(HLT | ImmException(code));
+}
+
+
+void Assembler::brk(int code) {
+ ASSERT(is_uint16(code));
+ Emit(BRK | ImmException(code));
+}
+
+
+void Assembler::debug(const char* message, uint32_t code, Instr params) {
+#ifdef USE_SIMULATOR
+ // Don't generate simulator specific code if we are building a snapshot, which
+ // might be run on real hardware.
+ if (!Serializer::enabled()) {
+#ifdef DEBUG
+ Serializer::TooLateToEnableNow();
+#endif
+ // The arguments to the debug marker need to be contiguous in memory, so
+ // make sure we don't try to emit pools.
+ BlockPoolsScope scope(this);
+
+ Label start;
+ bind(&start);
+
+ // Refer to instructions-arm64.h for a description of the marker and its
+ // arguments.
+ hlt(kImmExceptionIsDebug);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
+ dc32(code);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
+ dc32(params);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
+ EmitStringData(message);
+ hlt(kImmExceptionIsUnreachable);
+
+ return;
+ }
+ // Fall through if Serializer is enabled.
+#endif
+
+ if (params & BREAK) {
+ hlt(kImmExceptionIsDebug);
+ }
+}
+
+
+void Assembler::Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.SizeInBits();
+
+ ASSERT(immediate != 0);
+ ASSERT(immediate != -1);
+ ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ } else {
+ ASSERT(operand.IsShiftedRegister());
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
+ DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
+ }
+}
+
+
+void Assembler::LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op) {
+ unsigned reg_size = rd.SizeInBits();
+ Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
+ ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
+ Rn(rn));
+}
+
+
+void Assembler::ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ Instr ccmpop;
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ ASSERT(IsImmConditionalCompare(immediate));
+ ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
+ } else {
+ ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
+ }
+ Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
+}
+
+
+void Assembler::DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Emit(SF(rn) | op | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op) {
+ Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op) {
+ ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
+ Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
+}
+
+
+void Assembler::EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned shift_amount) {
+ switch (shift) {
+ case LSL:
+ lsl(rd, rn, shift_amount);
+ break;
+ case LSR:
+ lsr(rd, rn, shift_amount);
+ break;
+ case ASR:
+ asr(rd, rn, shift_amount);
+ break;
+ case ROR:
+ ror(rd, rn, shift_amount);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Assembler::EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift) {
+ ASSERT(rd.SizeInBits() >= rn.SizeInBits());
+ unsigned reg_size = rd.SizeInBits();
+ // Use the correct size of register.
+ Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
+ // Bits extracted are high_bit:0.
+ unsigned high_bit = (8 << (extend & 0x3)) - 1;
+ // Number of bits left in the result that are not introduced by the shift.
+ unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
+
+ if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
+ switch (extend) {
+ case UXTB:
+ case UXTH:
+ case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
+ case SXTB:
+ case SXTH:
+ case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
+ case UXTX:
+ case SXTX: {
+ ASSERT(rn.SizeInBits() == kXRegSizeInBits);
+ // Nothing to extend. Just shift.
+ lsl(rd, rn_, left_shift);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ } else {
+ // No need to extend as the extended bits would be shifted away.
+ lsl(rd, rn_, left_shift);
+ }
+}
+
+
+void Assembler::DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ ASSERT(operand.IsShiftedRegister());
+ ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
+ ASSERT(!operand.NeedsRelocation());
+ Emit(SF(rd) | op | Flags(S) |
+ ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
+ Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ ASSERT(!operand.NeedsRelocation());
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
+ ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
+ dest_reg | RnSP(rn));
+}
+
+
+bool Assembler::IsImmAddSub(int64_t immediate) {
+ return is_uint12(immediate) ||
+ (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+}
+
+void Assembler::LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ Instr memop = op | Rt(rt) | RnSP(addr.base());
+ ptrdiff_t offset = addr.offset();
+
+ if (addr.IsImmediateOffset()) {
+ LSDataSize size = CalcLSDataSize(op);
+ if (IsImmLSScaled(offset, size)) {
+ // Use the scaled addressing mode.
+ Emit(LoadStoreUnsignedOffsetFixed | memop |
+ ImmLSUnsigned(offset >> size));
+ } else if (IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ } else if (addr.IsRegisterOffset()) {
+ Extend ext = addr.extend();
+ Shift shift = addr.shift();
+ unsigned shift_amount = addr.shift_amount();
+
+ // LSL is encoded in the option field as UXTX.
+ if (shift == LSL) {
+ ext = UXTX;
+ }
+
+ // Shifts are encoded in one bit, indicating a left shift by the memory
+ // access size.
+ ASSERT((shift_amount == 0) ||
+ (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
+ Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
+ ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
+ } else {
+ // Pre-index and post-index modes.
+ ASSERT(!rt.Is(addr.base()));
+ if (IsImmLSUnscaled(offset)) {
+ if (addr.IsPreIndex()) {
+ Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
+ } else {
+ ASSERT(addr.IsPostIndex());
+ Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
+ }
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ }
+}
+
+
+bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
+ return is_int9(offset);
+}
+
+
+bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
+ bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ return offset_is_size_multiple && is_uint12(offset >> size);
+}
+
+
+void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) {
+ ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0);
+ // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
+ // constant pool. It should not be emitted.
+ ASSERT(!rt.Is(xzr));
+ Emit(LDR_x_lit |
+ ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) |
+ Rt(rt));
+}
+
+
+void Assembler::LoadRelocatedValue(const CPURegister& rt,
+ const Operand& operand,
+ LoadLiteralOp op) {
+ int64_t imm = operand.immediate();
+ ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
+ RecordRelocInfo(operand.rmode(), imm);
+ BlockConstPoolFor(1);
+ Emit(op | ImmLLiteral(0) | Rt(rt));
+}
+
+
+// Test if a given value can be encoded in the immediate field of a logical
+// instruction.
+// If it can be encoded, the function returns true, and values pointed to by n,
+// imm_s and imm_r are updated with immediates encoded in the format required
+// by the corresponding fields in the logical instruction.
+// If it can not be encoded, the function returns false, and the values pointed
+// to by n, imm_s and imm_r are undefined.
+bool Assembler::IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r) {
+ ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
+ ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
+
+ // Logical immediates are encoded using parameters n, imm_s and imm_r using
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+ // To test if an arbitary immediate can be encoded using this scheme, an
+ // iterative algorithm is used.
+ //
+ // TODO(mcapewel) This code does not consider using X/W register overlap to
+ // support 64-bit immediates where the top 32-bits are zero, and the bottom
+ // 32-bits are an encodable logical immediate.
+
+ // 1. If the value has all set or all clear bits, it can't be encoded.
+ if ((value == 0) || (value == 0xffffffffffffffffUL) ||
+ ((width == kWRegSizeInBits) && (value == 0xffffffff))) {
+ return false;
+ }
+
+ unsigned lead_zero = CountLeadingZeros(value, width);
+ unsigned lead_one = CountLeadingZeros(~value, width);
+ unsigned trail_zero = CountTrailingZeros(value, width);
+ unsigned trail_one = CountTrailingZeros(~value, width);
+ unsigned set_bits = CountSetBits(value, width);
+
+ // The fixed bits in the immediate s field.
+ // If width == 64 (X reg), start at 0xFFFFFF80.
+ // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
+ // widths won't be executed.
+ int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64;
+ int imm_s_mask = 0x3F;
+
+ for (;;) {
+ // 2. If the value is two bits wide, it can be encoded.
+ if (width == 2) {
+ *n = 0;
+ *imm_s = 0x3C;
+ *imm_r = (value & 3) - 1;
+ return true;
+ }
+
+ *n = (width == 64) ? 1 : 0;
+ *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
+ if ((lead_zero + set_bits) == width) {
+ *imm_r = 0;
+ } else {
+ *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
+ }
+
+ // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
+ // the bit width of the value, it can be encoded.
+ if (lead_zero + trail_zero + set_bits == width) {
+ return true;
+ }
+
+ // 4. If the sum of leading ones, trailing ones and unset bits in the
+ // value is equal to the bit width of the value, it can be encoded.
+ if (lead_one + trail_one + (width - set_bits) == width) {
+ return true;
+ }
+
+ // 5. If the most-significant half of the bitwise value is equal to the
+ // least-significant half, return to step 2 using the least-significant
+ // half of the value.
+ uint64_t mask = (1UL << (width >> 1)) - 1;
+ if ((value & mask) == ((value >> (width >> 1)) & mask)) {
+ width >>= 1;
+ set_bits >>= 1;
+ imm_s_fixed >>= 1;
+ continue;
+ }
+
+ // 6. Otherwise, the value can't be encoded.
+ return false;
+ }
+}
+
+
+bool Assembler::IsImmConditionalCompare(int64_t immediate) {
+ return is_uint5(immediate);
+}
+
+
+bool Assembler::IsImmFP32(float imm) {
+ // Valid values will have the form:
+ // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bits[19..0] are cleared.
+ if ((bits & 0x7ffff) != 0) {
+ return false;
+ }
+
+ // bits[29..25] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 16) & 0x3e00;
+ if (b_pattern != 0 && b_pattern != 0x3e00) {
+ return false;
+ }
+
+ // bit[30] and bit[29] are opposite.
+ if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmFP64(double imm) {
+ // Valid values will have the form:
+ // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bits[47..0] are cleared.
+ if ((bits & 0xffffffffffffL) != 0) {
+ return false;
+ }
+
+ // bits[61..54] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+ if (b_pattern != 0 && b_pattern != 0x3fc0) {
+ return false;
+ }
+
+ // bit[62] and bit[61] are opposite.
+ if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4 * KB) {
+ desc.buffer_size = 4 * KB;
+ } else if (buffer_size_ < 1 * MB) {
+ desc.buffer_size = 2 * buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1 * MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ byte* buffer = reinterpret_cast<byte*>(buffer_);
+
+ // Set up new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
+
+ // Copy the data.
+ intptr_t pc_delta = desc.buffer - buffer;
+ intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+ (buffer + buffer_size_);
+ memmove(desc.buffer, buffer, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // Switch buffers.
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
+
+ // Relocate pending relocation entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION);
+ if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+ }
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ if (((rmode >= RelocInfo::JS_RETURN) &&
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+ (rmode == RelocInfo::CONST_POOL) ||
+ (rmode == RelocInfo::VENEER_POOL)) {
+ // Adjust code for new modes.
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ || RelocInfo::IsJSReturn(rmode)
+ || RelocInfo::IsComment(rmode)
+ || RelocInfo::IsPosition(rmode)
+ || RelocInfo::IsConstPool(rmode)
+ || RelocInfo::IsVeneerPool(rmode));
+ // These modes do not need an entry in the constant pool.
+ } else {
+ ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+ if (num_pending_reloc_info_ == 0) {
+ first_const_pool_use_ = pc_offset();
+ }
+ pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+ }
+
+ if (!RelocInfo::IsNone(rmode)) {
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !emit_debug_code()) {
+ return;
+ }
+ }
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(
+ reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
+ ClearRecordedAstId();
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
+ }
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+ int pc_limit = pc_offset() + instructions * kInstructionSize;
+ if (no_const_pool_before_ < pc_limit) {
+ // If there are some pending entries, the constant pool cannot be blocked
+ // further than first_const_pool_use_ + kMaxDistToConstPool
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
+ no_const_pool_before_ = pc_limit;
+ }
+
+ if (next_constant_pool_check_ < no_const_pool_before_) {
+ next_constant_pool_check_ = no_const_pool_before_;
+ }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Some short sequence of instruction mustn't be broken up by constant pool
+ // emission, such sequences are protected by calls to BlockConstPoolFor and
+ // BlockConstPoolScope.
+ if (is_const_pool_blocked()) {
+ // Something is wrong if emission is forced and blocked at the same time.
+ ASSERT(!force_emit);
+ return;
+ }
+
+ // There is nothing to do if there are no pending constant pool entries.
+ if (num_pending_reloc_info_ == 0) {
+ // Calculate the offset of the next check.
+ next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
+ return;
+ }
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance to the first instruction accessing the constant pool is
+ // kAvgDistToConstPool or more.
+ // * no jump is required and the distance to the first instruction accessing
+ // the constant pool is at least kMaxDistToPConstool / 2.
+ ASSERT(first_const_pool_use_ >= 0);
+ int dist = pc_offset() - first_const_pool_use_;
+ if (!force_emit && dist < kAvgDistToConstPool &&
+ (require_jump || (dist < (kMaxDistToConstPool / 2)))) {
+ return;
+ }
+
+ int jump_instr = require_jump ? kInstructionSize : 0;
+ int size_pool_marker = kInstructionSize;
+ int size_pool_guard = kInstructionSize;
+ int pool_size = jump_instr + size_pool_marker + size_pool_guard +
+ num_pending_reloc_info_ * kPointerSize;
+ int needed_space = pool_size + kGap;
+
+ // Emit veneers for branches that would go out of range during emission of the
+ // constant pool.
+ CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size);
+
+ Label size_check;
+ bind(&size_check);
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (include the jump over the pool, the constant pool marker, the
+ // constant pool guard, and the gap to the relocation information).
+ while (buffer_space() <= needed_space) {
+ GrowBuffer();
+ }
+
+ {
+ // Block recursive calls to CheckConstPool and protect from veneer pools.
+ BlockPoolsScope block_pools(this);
+ RecordComment("[ Constant Pool");
+ RecordConstPool(pool_size);
+
+ // Emit jump over constant pool if necessary.
+ Label after_pool;
+ if (require_jump) {
+ b(&after_pool);
+ }
+
+ // Emit a constant pool header. The header has two goals:
+ // 1) Encode the size of the constant pool, for use by the disassembler.
+ // 2) Terminate the program, to try to prevent execution from accidentally
+ // flowing into the constant pool.
+ // The header is therefore made of two arm64 instructions:
+ // ldr xzr, #<size of the constant pool in 32-bit words>
+ // blr xzr
+ // If executed the code will likely segfault and lr will point to the
+ // beginning of the constant pool.
+ // TODO(all): currently each relocated constant is 64 bits, consider adding
+ // support for 32-bit entries.
+ ConstantPoolMarker(2 * num_pending_reloc_info_);
+ ConstantPoolGuard();
+
+ // Emit constant pool entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
+ rinfo.rmode() != RelocInfo::CONST_POOL &&
+ rinfo.rmode() != RelocInfo::VENEER_POOL);
+
+ Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ ASSERT(instr->IsLdrLiteral() &&
+ instr->ImmLLiteral() == 0);
+
+ instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+ dc64(rinfo.data());
+ }
+
+ num_pending_reloc_info_ = 0;
+ first_const_pool_use_ = -1;
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
+ }
+
+ // Since a constant pool was just emitted, move the check offset forward by
+ // the standard interval.
+ next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
+
+ ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
+ static_cast<unsigned>(pool_size));
+}
+
+
+bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
+ // Account for the branch around the veneers and the guard.
+ int protection_offset = 2 * kInstructionSize;
+ return pc_offset() > max_reachable_pc - margin - protection_offset -
+ static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
+}
+
+
+void Assembler::RecordVeneerPool(int location_offset, int size) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ RelocInfo rinfo(buffer_ + location_offset,
+ RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
+ NULL);
+ reloc_info_writer.Write(&rinfo);
+#endif
+}
+
+
+void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
+ BlockPoolsScope scope(this);
+ RecordComment("[ Veneers");
+
+ // The exact size of the veneer pool must be recorded (see the comment at the
+ // declaration site of RecordConstPool()), but computing the number of
+ // veneers that will be generated is not obvious. So instead we remember the
+ // current position and will record the size after the pool has been
+ // generated.
+ Label size_check;
+ bind(&size_check);
+ int veneer_pool_relocinfo_loc = pc_offset();
+
+ Label end;
+ if (need_protection) {
+ b(&end);
+ }
+
+ EmitVeneersGuard();
+
+ Label veneer_size_check;
+
+ std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
+
+ it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ if (force_emit || ShouldEmitVeneer(it->first, margin)) {
+ Instruction* branch = InstructionAt(it->second.pc_offset_);
+ Label* label = it->second.label_;
+
+#ifdef DEBUG
+ bind(&veneer_size_check);
+#endif
+ // Patch the branch to point to the current position, and emit a branch
+ // to the label.
+ Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
+ RemoveBranchFromLabelLinkChain(branch, label, veneer);
+ branch->SetImmPCOffsetTarget(veneer);
+ b(label);
+#ifdef DEBUG
+ ASSERT(SizeOfCodeGeneratedSince(&veneer_size_check) <=
+ static_cast<uint64_t>(kMaxVeneerCodeSize));
+ veneer_size_check.Unuse();
+#endif
+
+ it_to_delete = it++;
+ unresolved_branches_.erase(it_to_delete);
+ } else {
+ ++it;
+ }
+ }
+
+ // Record the veneer pool size.
+ int pool_size = SizeOfCodeGeneratedSince(&size_check);
+ RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
+
+ if (unresolved_branches_.empty()) {
+ next_veneer_pool_check_ = kMaxInt;
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+
+ bind(&end);
+
+ RecordComment("]");
+}
+
+
+void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
+ int margin) {
+ // There is nothing to do if there are no pending veneer pool entries.
+ if (unresolved_branches_.empty()) {
+ ASSERT(next_veneer_pool_check_ == kMaxInt);
+ return;
+ }
+
+ ASSERT(pc_offset() < unresolved_branches_first_limit());
+
+ // Some short sequence of instruction mustn't be broken up by veneer pool
+ // emission, such sequences are protected by calls to BlockVeneerPoolFor and
+ // BlockVeneerPoolScope.
+ if (is_veneer_pool_blocked()) {
+ ASSERT(!force_emit);
+ return;
+ }
+
+ if (!require_jump) {
+ // Prefer emitting veneers protected by an existing instruction.
+ margin *= kVeneerNoProtectionFactor;
+ }
+ if (force_emit || ShouldEmitVeneers(margin)) {
+ EmitVeneers(force_emit, require_jump, margin);
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_code_comments) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+int Assembler::buffer_space() const {
+ return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordConstPool(int size) {
+ // We only need this for debugger support, to correctly compute offsets in the
+ // code.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
+#endif
+}
+
+
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
new file mode 100644
index 0000000000..1aae2f291e
--- /dev/null
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -0,0 +1,2233 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
+#define V8_ARM64_ASSEMBLER_ARM64_H_
+
+#include <list>
+#include <map>
+
+#include "globals.h"
+#include "utils.h"
+#include "assembler.h"
+#include "serialize.h"
+#include "arm64/instructions-arm64.h"
+#include "arm64/cpu-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Registers.
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+
+static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
+
+
+// Some CPURegister methods can return Register and FPRegister types, so we
+// need to declare them in advance.
+struct Register;
+struct FPRegister;
+
+
+struct CPURegister {
+ enum RegisterType {
+ // The kInvalid value is used to detect uninitialized static instances,
+ // which are always zero-initialized before any constructors are called.
+ kInvalid = 0,
+ kRegister,
+ kFPRegister,
+ kNoRegister
+ };
+
+ static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
+ CPURegister r = {code, size, type};
+ return r;
+ }
+
+ unsigned code() const;
+ RegisterType type() const;
+ RegList Bit() const;
+ unsigned SizeInBits() const;
+ int SizeInBytes() const;
+ bool Is32Bits() const;
+ bool Is64Bits() const;
+ bool IsValid() const;
+ bool IsValidOrNone() const;
+ bool IsValidRegister() const;
+ bool IsValidFPRegister() const;
+ bool IsNone() const;
+ bool Is(const CPURegister& other) const;
+
+ bool IsZero() const;
+ bool IsSP() const;
+
+ bool IsRegister() const;
+ bool IsFPRegister() const;
+
+ Register X() const;
+ Register W() const;
+ FPRegister D() const;
+ FPRegister S() const;
+
+ bool IsSameSizeAndType(const CPURegister& other) const;
+
+ // V8 compatibility.
+ bool is(const CPURegister& other) const { return Is(other); }
+ bool is_valid() const { return IsValid(); }
+
+ unsigned reg_code;
+ unsigned reg_size;
+ RegisterType reg_type;
+};
+
+
+struct Register : public CPURegister {
+ static Register Create(unsigned code, unsigned size) {
+ return Register(CPURegister::Create(code, size, CPURegister::kRegister));
+ }
+
+ Register() {
+ reg_code = 0;
+ reg_size = 0;
+ reg_type = CPURegister::kNoRegister;
+ }
+
+ explicit Register(const CPURegister& r) {
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ Register(const Register& r) { // NOLINT(runtime/explicit)
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ bool IsValid() const {
+ ASSERT(IsRegister() || IsNone());
+ return IsValidRegister();
+ }
+
+ static Register XRegFromCode(unsigned code);
+ static Register WRegFromCode(unsigned code);
+
+ // Start of V8 compatibility section ---------------------
+ // These memebers are necessary for compilation.
+ // A few of them may be unused for now.
+
+ static const int kNumRegisters = kNumberOfRegisters;
+ static int NumRegisters() { return kNumRegisters; }
+
+ // We allow crankshaft to use the following registers:
+ // - x0 to x15
+ // - x18 to x24
+ // - x27 (also context)
+ //
+ // TODO(all): Register x25 is currently free and could be available for
+ // crankshaft, but we don't use it as we might use it as a per function
+ // literal pool pointer in the future.
+ //
+ // TODO(all): Consider storing cp in x25 to have only two ranges.
+ // We split allocatable registers in three ranges called
+ // - "low range"
+ // - "high range"
+ // - "context"
+ static const unsigned kAllocatableLowRangeBegin = 0;
+ static const unsigned kAllocatableLowRangeEnd = 15;
+ static const unsigned kAllocatableHighRangeBegin = 18;
+ static const unsigned kAllocatableHighRangeEnd = 24;
+ static const unsigned kAllocatableContext = 27;
+
+ // Gap between low and high ranges.
+ static const int kAllocatableRangeGapSize =
+ (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
+
+ static const int kMaxNumAllocatableRegisters =
+ (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
+ (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+ // Return true if the register is one that crankshaft can allocate.
+ bool IsAllocatable() const {
+ return ((reg_code == kAllocatableContext) ||
+ (reg_code <= kAllocatableLowRangeEnd) ||
+ ((reg_code >= kAllocatableHighRangeBegin) &&
+ (reg_code <= kAllocatableHighRangeEnd)));
+ }
+
+ static Register FromAllocationIndex(unsigned index) {
+ ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+ // cp is the last allocatable register.
+ if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
+ return from_code(kAllocatableContext);
+ }
+
+ // Handle low and high ranges.
+ return (index <= kAllocatableLowRangeEnd)
+ ? from_code(index)
+ : from_code(index + kAllocatableRangeGapSize);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ ASSERT((kAllocatableLowRangeBegin == 0) &&
+ (kAllocatableLowRangeEnd == 15) &&
+ (kAllocatableHighRangeBegin == 18) &&
+ (kAllocatableHighRangeEnd == 24) &&
+ (kAllocatableContext == 27));
+ const char* const names[] = {
+ "x0", "x1", "x2", "x3", "x4",
+ "x5", "x6", "x7", "x8", "x9",
+ "x10", "x11", "x12", "x13", "x14",
+ "x15", "x18", "x19", "x20", "x21",
+ "x22", "x23", "x24", "x27",
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(Register reg) {
+ ASSERT(reg.IsAllocatable());
+ unsigned code = reg.code();
+ if (code == kAllocatableContext) {
+ return NumAllocatableRegisters() - 1;
+ }
+
+ return (code <= kAllocatableLowRangeEnd)
+ ? code
+ : code - kAllocatableRangeGapSize;
+ }
+
+ static Register from_code(int code) {
+ // Always return an X register.
+ return Register::Create(code, kXRegSizeInBits);
+ }
+
+ // End of V8 compatibility section -----------------------
+};
+
+
+struct FPRegister : public CPURegister {
+ static FPRegister Create(unsigned code, unsigned size) {
+ return FPRegister(
+ CPURegister::Create(code, size, CPURegister::kFPRegister));
+ }
+
+ FPRegister() {
+ reg_code = 0;
+ reg_size = 0;
+ reg_type = CPURegister::kNoRegister;
+ }
+
+ explicit FPRegister(const CPURegister& r) {
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ bool IsValid() const {
+ ASSERT(IsFPRegister() || IsNone());
+ return IsValidFPRegister();
+ }
+
+ static FPRegister SRegFromCode(unsigned code);
+ static FPRegister DRegFromCode(unsigned code);
+
+ // Start of V8 compatibility section ---------------------
+ static const int kMaxNumRegisters = kNumberOfFPRegisters;
+
+ // Crankshaft can use all the FP registers except:
+ // - d15 which is used to keep the 0 double value
+ // - d30 which is used in crankshaft as a double scratch register
+ // - d31 which is used in the MacroAssembler as a double scratch register
+ static const unsigned kAllocatableLowRangeBegin = 0;
+ static const unsigned kAllocatableLowRangeEnd = 14;
+ static const unsigned kAllocatableHighRangeBegin = 16;
+ static const unsigned kAllocatableHighRangeEnd = 29;
+
+ static const RegList kAllocatableFPRegisters = 0x3fff7fff;
+
+ // Gap between low and high ranges.
+ static const int kAllocatableRangeGapSize =
+ (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
+
+ static const int kMaxNumAllocatableRegisters =
+ (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
+ (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+ // Return true if the register is one that crankshaft can allocate.
+ bool IsAllocatable() const {
+ return (Bit() & kAllocatableFPRegisters) != 0;
+ }
+
+ static FPRegister FromAllocationIndex(unsigned int index) {
+ ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+
+ return (index <= kAllocatableLowRangeEnd)
+ ? from_code(index)
+ : from_code(index + kAllocatableRangeGapSize);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ ASSERT((kAllocatableLowRangeBegin == 0) &&
+ (kAllocatableLowRangeEnd == 14) &&
+ (kAllocatableHighRangeBegin == 16) &&
+ (kAllocatableHighRangeEnd == 29));
+ const char* const names[] = {
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29"
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(FPRegister reg) {
+ ASSERT(reg.IsAllocatable());
+ unsigned code = reg.code();
+
+ return (code <= kAllocatableLowRangeEnd)
+ ? code
+ : code - kAllocatableRangeGapSize;
+ }
+
+ static FPRegister from_code(int code) {
+ // Always return a D register.
+ return FPRegister::Create(code, kDRegSizeInBits);
+ }
+ // End of V8 compatibility section -----------------------
+};
+
+
+STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
+STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
+
+
+#if defined(ARM64_DEFINE_REG_STATICS)
+#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
+ const CPURegister init_##register_class##_##name = {code, size, type}; \
+ const register_class& name = *reinterpret_cast<const register_class*>( \
+ &init_##register_class##_##name)
+#define ALIAS_REGISTER(register_class, alias, name) \
+ const register_class& alias = *reinterpret_cast<const register_class*>( \
+ &init_##register_class##_##name)
+#else
+#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
+ extern const register_class& name
+#define ALIAS_REGISTER(register_class, alias, name) \
+ extern const register_class& alias
+#endif // defined(ARM64_DEFINE_REG_STATICS)
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and FPRegister
+// variants are provided for convenience.
+INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
+INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
+INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
+
+// v8 compatibility.
+INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
+
+#define DEFINE_REGISTERS(N) \
+ INITIALIZE_REGISTER(Register, w##N, N, \
+ kWRegSizeInBits, CPURegister::kRegister); \
+ INITIALIZE_REGISTER(Register, x##N, N, \
+ kXRegSizeInBits, CPURegister::kRegister);
+REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+
+INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
+ CPURegister::kRegister);
+INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
+ CPURegister::kRegister);
+
+#define DEFINE_FPREGISTERS(N) \
+ INITIALIZE_REGISTER(FPRegister, s##N, N, \
+ kSRegSizeInBits, CPURegister::kFPRegister); \
+ INITIALIZE_REGISTER(FPRegister, d##N, N, \
+ kDRegSizeInBits, CPURegister::kFPRegister);
+REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
+#undef DEFINE_FPREGISTERS
+
+#undef INITIALIZE_REGISTER
+
+// Registers aliases.
+ALIAS_REGISTER(Register, ip0, x16);
+ALIAS_REGISTER(Register, ip1, x17);
+ALIAS_REGISTER(Register, wip0, w16);
+ALIAS_REGISTER(Register, wip1, w17);
+// Root register.
+ALIAS_REGISTER(Register, root, x26);
+ALIAS_REGISTER(Register, rr, x26);
+// Context pointer register.
+ALIAS_REGISTER(Register, cp, x27);
+// We use a register as a JS stack pointer to overcome the restriction on the
+// architectural SP alignment.
+// We chose x28 because it is contiguous with the other specific purpose
+// registers.
+STATIC_ASSERT(kJSSPCode == 28);
+ALIAS_REGISTER(Register, jssp, x28);
+ALIAS_REGISTER(Register, wjssp, w28);
+ALIAS_REGISTER(Register, fp, x29);
+ALIAS_REGISTER(Register, lr, x30);
+ALIAS_REGISTER(Register, xzr, x31);
+ALIAS_REGISTER(Register, wzr, w31);
+
+// Keeps the 0 double value.
+ALIAS_REGISTER(FPRegister, fp_zero, d15);
+// Crankshaft double scratch register.
+ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d30);
+// MacroAssembler double scratch register.
+ALIAS_REGISTER(FPRegister, fp_scratch, d31);
+
+#undef ALIAS_REGISTER
+
+
+Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = NoReg,
+ Register reg3 = NoReg,
+ Register reg4 = NoReg);
+
+
+// AreAliased returns true if any of the named registers overlap. Arguments set
+// to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg,
+ const CPURegister& reg5 = NoCPUReg,
+ const CPURegister& reg6 = NoCPUReg,
+ const CPURegister& reg7 = NoCPUReg,
+ const CPURegister& reg8 = NoCPUReg);
+
+
+typedef FPRegister DoubleRegister;
+
+
+// -----------------------------------------------------------------------------
+// Lists of registers.
+class CPURegList {
+ public:
+ explicit CPURegList(CPURegister reg1,
+ CPURegister reg2 = NoCPUReg,
+ CPURegister reg3 = NoCPUReg,
+ CPURegister reg4 = NoCPUReg)
+ : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
+ size_(reg1.SizeInBits()), type_(reg1.type()) {
+ ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ : list_(list), size_(size), type_(type) {
+ ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size,
+ unsigned first_reg, unsigned last_reg)
+ : size_(size), type_(type) {
+ ASSERT(((type == CPURegister::kRegister) &&
+ (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kFPRegister) &&
+ (last_reg < kNumberOfFPRegisters)));
+ ASSERT(last_reg >= first_reg);
+ list_ = (1UL << (last_reg + 1)) - 1;
+ list_ &= ~((1UL << first_reg) - 1);
+ ASSERT(IsValid());
+ }
+
+ CPURegister::RegisterType type() const {
+ ASSERT(IsValid());
+ return type_;
+ }
+
+ RegList list() const {
+ ASSERT(IsValid());
+ return list_;
+ }
+
+ inline void set_list(RegList new_list) {
+ ASSERT(IsValid());
+ list_ = new_list;
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other);
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type and size of the registers
+ // in the 'other' list must match those in this list.
+ void Remove(const CPURegList& other);
+
+ // Variants of Combine and Remove which take CPURegisters.
+ void Combine(const CPURegister& other);
+ void Remove(const CPURegister& other1,
+ const CPURegister& other2 = NoCPUReg,
+ const CPURegister& other3 = NoCPUReg,
+ const CPURegister& other4 = NoCPUReg);
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ void Combine(int code);
+ void Remove(int code);
+
+ // Remove all callee-saved registers from the list. This can be useful when
+ // preparing registers for an AAPCS64 function call, for example.
+ void RemoveCalleeSaved();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
+ static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
+ static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
+
+ // Registers saved as safepoints.
+ static CPURegList GetSafepointSavedRegisters();
+
+ bool IsEmpty() const {
+ ASSERT(IsValid());
+ return list_ == 0;
+ }
+
+ bool IncludesAliasOf(const CPURegister& other1,
+ const CPURegister& other2 = NoCPUReg,
+ const CPURegister& other3 = NoCPUReg,
+ const CPURegister& other4 = NoCPUReg) const {
+ ASSERT(IsValid());
+ RegList list = 0;
+ if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
+ if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
+ if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
+ if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
+ return (list_ & list) != 0;
+ }
+
+ int Count() const {
+ ASSERT(IsValid());
+ return CountSetBits(list_, kRegListSizeInBits);
+ }
+
+ unsigned RegisterSizeInBits() const {
+ ASSERT(IsValid());
+ return size_;
+ }
+
+ unsigned RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ ASSERT((size_in_bits % kBitsPerByte) == 0);
+ return size_in_bits / kBitsPerByte;
+ }
+
+ private:
+ RegList list_;
+ unsigned size_;
+ CPURegister::RegisterType type_;
+
+ bool IsValid() const {
+ const RegList kValidRegisters = 0x8000000ffffffff;
+ const RegList kValidFPRegisters = 0x0000000ffffffff;
+ switch (type_) {
+ case CPURegister::kRegister:
+ return (list_ & kValidRegisters) == list_;
+ case CPURegister::kFPRegister:
+ return (list_ & kValidFPRegisters) == list_;
+ case CPURegister::kNoRegister:
+ return list_ == 0;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+};
+
+
+// AAPCS64 callee-saved registers.
+#define kCalleeSaved CPURegList::GetCalleeSaved()
+#define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
+
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+#define kCallerSaved CPURegList::GetCallerSaved()
+#define kCallerSavedFP CPURegList::GetCallerSavedFP()
+
+
+// -----------------------------------------------------------------------------
+// Operands.
+const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+
+// Represents an operand in a machine instruction.
+class Operand {
+ // TODO(all): If necessary, study more in details which methods
+ // TODO(all): should be inlined or not.
+ public:
+ // rm, {<shift> {#<shift_amount>}}
+ // where <shift> is one of {LSL, LSR, ASR, ROR}.
+ // <shift_amount> is uint6_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ inline Operand(Register reg,
+ Shift shift = LSL,
+ unsigned shift_amount = 0); // NOLINT(runtime/explicit)
+
+ // rm, <extend> {#<shift_amount>}
+ // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
+ // <shift_amount> is uint2_t.
+ inline Operand(Register reg,
+ Extend extend,
+ unsigned shift_amount = 0);
+
+ template<typename T>
+ inline explicit Operand(Handle<T> handle);
+
+ // Implicit constructor for all int types, ExternalReference, and Smi.
+ template<typename T>
+ inline Operand(T t); // NOLINT(runtime/explicit)
+
+ // Implicit constructor for int types.
+ template<typename int_t>
+ inline Operand(int_t t, RelocInfo::Mode rmode);
+
+ inline bool IsImmediate() const;
+ inline bool IsShiftedRegister() const;
+ inline bool IsExtendedRegister() const;
+ inline bool IsZero() const;
+
+ // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
+ // which helps in the encoding of instructions that use the stack pointer.
+ inline Operand ToExtendedRegister() const;
+
+ inline int64_t immediate() const;
+ inline Register reg() const;
+ inline Shift shift() const;
+ inline Extend extend() const;
+ inline unsigned shift_amount() const;
+
+ // Relocation information.
+ RelocInfo::Mode rmode() const { return rmode_; }
+ void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
+ bool NeedsRelocation() const;
+
+ // Helpers
+ inline static Operand UntagSmi(Register smi);
+ inline static Operand UntagSmiAndScale(Register smi, int scale);
+
+ private:
+ void initialize_handle(Handle<Object> value);
+ int64_t immediate_;
+ Register reg_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+ RelocInfo::Mode rmode_;
+};
+
+
+// MemOperand represents a memory operand in a load or store instruction.
+class MemOperand {
+ public:
+ inline explicit MemOperand(Register base,
+ ptrdiff_t offset = 0,
+ AddrMode addrmode = Offset);
+ inline explicit MemOperand(Register base,
+ Register regoffset,
+ Shift shift = LSL,
+ unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base,
+ const Operand& offset,
+ AddrMode addrmode = Offset);
+
+ const Register& base() const { return base_; }
+ const Register& regoffset() const { return regoffset_; }
+ ptrdiff_t offset() const { return offset_; }
+ AddrMode addrmode() const { return addrmode_; }
+ Shift shift() const { return shift_; }
+ Extend extend() const { return extend_; }
+ unsigned shift_amount() const { return shift_amount_; }
+ inline bool IsImmediateOffset() const;
+ inline bool IsRegisterOffset() const;
+ inline bool IsPreIndex() const;
+ inline bool IsPostIndex() const;
+
+ // For offset modes, return the offset as an Operand. This helper cannot
+ // handle indexed modes.
+ inline Operand OffsetAsOperand() const;
+
+ private:
+ Register base_;
+ Register regoffset_;
+ ptrdiff_t offset_;
+ AddrMode addrmode_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+class Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
+
+ virtual ~Assembler();
+
+ virtual void AbortedCodeGeneration() {
+ num_pending_reloc_info_ = 0;
+ }
+
+ // System functions ---------------------------------------------------------
+ // Start generating code from the beginning of the buffer, discarding any code
+ // and data that has already been emitted into the buffer.
+ //
+ // In order to avoid any accidental transfer of state, Reset ASSERTs that the
+ // constant pool is not blocked.
+ void Reset();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ //
+ // The descriptor (desc) can be NULL. In that case, the code is finalized as
+ // usual, but the descriptor is not populated.
+ void GetCode(CodeDesc* desc);
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+
+ inline void Unreachable();
+
+ // Label --------------------------------------------------------------------
+ // Bind a label to the current pc. Note that labels can only be bound once,
+ // and if labels are linked to other instructions, they _must_ be bound
+ // before they go out of scope.
+ void bind(Label* label);
+
+
+ // RelocInfo and pools ------------------------------------------------------
+
+ // Record relocation information for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc.
+ inline static Address target_pointer_address_at(Address pc);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ inline static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ inline static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target);
+ static inline Address target_address_at(Address pc, Code* code);
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target);
+
+ // Return the code target address at a call site from the return address of
+ // that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
+ // Given the address of the beginning of a call, return the address in the
+ // instruction stream that call will return from.
+ inline static Address return_address_from_call_start(Address pc);
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches within generated code.
+ inline static void deserialization_set_special_target_at(
+ Address constant_pool_entry, Code* code, Address target);
+
+ // All addresses in the constant pool are the same size as pointers.
+ static const int kSpecialTargetSize = kPointerSize;
+
+ // The sizes of the call sequences emitted by MacroAssembler::Call.
+ // Wherever possible, use MacroAssembler::CallSize instead of these constants,
+ // as it will choose the correct value for a given relocation mode.
+ //
+ // Without relocation:
+ // movz temp, #(target & 0x000000000000ffff)
+ // movk temp, #(target & 0x00000000ffff0000)
+ // movk temp, #(target & 0x0000ffff00000000)
+ // blr temp
+ //
+ // With relocation:
+ // ldr temp, =target
+ // blr temp
+ static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
+ static const int kCallSizeWithRelocation = 2 * kInstructionSize;
+
+ // Size of the generated code in bytes
+ uint64_t SizeOfGeneratedCode() const {
+ ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
+ return pc_ - buffer_;
+ }
+
+ // Return the code size generated from label to the current position.
+ uint64_t SizeOfCodeGeneratedSince(const Label* label) {
+ ASSERT(label->is_bound());
+ ASSERT(pc_offset() >= label->pos());
+ ASSERT(pc_offset() < buffer_size_);
+ return pc_offset() - label->pos();
+ }
+
+ // Check the size of the code generated since the given label. This function
+ // is used primarily to work around comparisons between signed and unsigned
+ // quantities, since V8 uses both.
+ // TODO(jbramley): Work out what sign to use for these things and if possible,
+ // change things to be consistent.
+ void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
+ ASSERT(size >= 0);
+ ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
+ }
+
+ // Return the number of instructions generated from label to the
+ // current position.
+ int InstructionsGeneratedSince(const Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstructionSize;
+ }
+
+ // Number of instructions generated for the return sequence in
+ // FullCodeGenerator::EmitReturnSequence.
+ static const int kJSRetSequenceInstructions = 7;
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = 0;
+ static const int kPatchDebugBreakSlotAddressOffset = 0;
+
+ // Number of instructions necessary to be able to later patch it to a call.
+ // See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot().
+ static const int kDebugBreakSlotInstructions = 4;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstructionSize;
+
+ static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
+
+ // Prevent contant pool emission until EndBlockConstPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockConstPool();
+
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockConstPool to have an effect.
+ void EndBlockConstPool();
+
+ bool is_const_pool_blocked() const;
+ static bool IsConstantPoolAt(Instruction* instr);
+ static int ConstantPoolSizeAt(Instruction* instr);
+ // See Assembler::CheckConstPool for more info.
+ void ConstantPoolMarker(uint32_t size);
+ void EmitPoolGuard();
+ void ConstantPoolGuard();
+
+ // Prevent veneer pool emission until EndBlockVeneerPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockVeneerPool();
+
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockVeneerPool to have an effect.
+ void EndBlockVeneerPool();
+
+ bool is_veneer_pool_blocked() const {
+ return veneer_pool_blocked_nesting_ > 0;
+ }
+
+ // Block/resume emission of constant pools and veneer pools.
+ void StartBlockPools() {
+ StartBlockConstPool();
+ StartBlockVeneerPool();
+ }
+ void EndBlockPools() {
+ EndBlockConstPool();
+ EndBlockVeneerPool();
+ }
+
+ // Debugging ----------------------------------------------------------------
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ void RecordComment(const char* msg);
+ int buffer_space() const;
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record the emission of a constant pool.
+ //
+ // The emission of constant and veneer pools depends on the size of the code
+ // generated and the number of RelocInfo recorded.
+ // The Debug mechanism needs to map code offsets between two versions of a
+ // function, compiled with and without debugger support (see for example
+ // Debug::PrepareForBreakPoints()).
+ // Compiling functions with debugger support generates additional code
+ // (Debug::GenerateSlot()). This may affect the emission of the pools and
+ // cause the version of the code with debugger support to have pools generated
+ // in different places.
+ // Recording the position and size of emitted pools allows to correctly
+ // compute the offset mappings between the different versions of a function in
+ // all situations.
+ //
+ // The parameter indicates the size of the pool (in bytes), including
+ // the marker and branch over the data.
+ void RecordConstPool(int size);
+
+
+ // Instruction set functions ------------------------------------------------
+
+ // Branch / Jump instructions.
+ // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
+ // Branch to register.
+ void br(const Register& xn);
+
+ // Branch-link to register.
+ void blr(const Register& xn);
+
+ // Branch to register with return hint.
+ void ret(const Register& xn = lr);
+
+ // Unconditional branch to label.
+ void b(Label* label);
+
+ // Conditional branch to label.
+ void b(Label* label, Condition cond);
+
+ // Unconditional branch to PC offset.
+ void b(int imm26);
+
+ // Conditional branch to PC offset.
+ void b(int imm19, Condition cond);
+
+ // Branch-link to label / pc offset.
+ void bl(Label* label);
+ void bl(int imm26);
+
+ // Compare and branch to label / pc offset if zero.
+ void cbz(const Register& rt, Label* label);
+ void cbz(const Register& rt, int imm19);
+
+ // Compare and branch to label / pc offset if not zero.
+ void cbnz(const Register& rt, Label* label);
+ void cbnz(const Register& rt, int imm19);
+
+ // Test bit and branch to label / pc offset if zero.
+ void tbz(const Register& rt, unsigned bit_pos, Label* label);
+ void tbz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Test bit and branch to label / pc offset if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void tbnz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Address calculation instructions.
+ // Calculate a PC-relative address. Unlike for branches the offset in adr is
+ // unscaled (i.e. the result can be unaligned).
+ void adr(const Register& rd, Label* label);
+ void adr(const Register& rd, int imm21);
+
+ // Data Processing instructions.
+ // Add.
+ void add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add and update status flags.
+ void adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare negative.
+ void cmn(const Register& rn, const Operand& operand);
+
+ // Subtract.
+ void sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract and update status flags.
+ void subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare.
+ void cmp(const Register& rn, const Operand& operand);
+
+ // Negate.
+ void neg(const Register& rd,
+ const Operand& operand);
+
+ // Negate and update status flags.
+ void negs(const Register& rd,
+ const Operand& operand);
+
+ // Add with carry bit.
+ void adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add with carry bit and update status flags.
+ void adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit.
+ void sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit and update status flags.
+ void sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Negate with carry bit.
+ void ngc(const Register& rd,
+ const Operand& operand);
+
+ // Negate with carry bit and update status flags.
+ void ngcs(const Register& rd,
+ const Operand& operand);
+
+ // Logical instructions.
+ // Bitwise and (A & B).
+ void and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise and (A & B) and update status flags.
+ void ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit test, and set flags.
+ void tst(const Register& rn, const Operand& operand);
+
+ // Bit clear (A & ~B).
+ void bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit clear (A & ~B) and update status flags.
+ void bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise or (A | B).
+ void orr(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise nor (A | ~B).
+ void orn(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise eor/xor (A ^ B).
+ void eor(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise enor/xnor (A ^ ~B).
+ void eon(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Logical shift left variable.
+ void lslv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Logical shift right variable.
+ void lsrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Arithmetic shift right variable.
+ void asrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Rotate right variable.
+ void rorv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bitfield instructions.
+ // Bitfield move.
+ void bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Signed bitfield move.
+ void sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Unsigned bitfield move.
+ void ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Bfm aliases.
+ // Bitfield insert.
+ void bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Bitfield extract and insert low.
+ void bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ bfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Sbfm aliases.
+ // Arithmetic shift right.
+ void asr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.SizeInBits());
+ sbfm(rd, rn, shift, rd.SizeInBits() - 1);
+ }
+
+ // Signed bitfield insert in zero.
+ void sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Signed bitfield extract.
+ void sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ sbfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Signed extend byte.
+ void sxtb(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 7);
+ }
+
+ // Signed extend halfword.
+ void sxth(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 15);
+ }
+
+ // Signed extend word.
+ void sxtw(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 31);
+ }
+
+ // Ubfm aliases.
+ // Logical shift left.
+ void lsl(const Register& rd, const Register& rn, unsigned shift) {
+ unsigned reg_size = rd.SizeInBits();
+ ASSERT(shift < reg_size);
+ ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
+ }
+
+ // Logical shift right.
+ void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.SizeInBits());
+ ubfm(rd, rn, shift, rd.SizeInBits() - 1);
+ }
+
+ // Unsigned bitfield insert in zero.
+ void ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Unsigned bitfield extract.
+ void ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ ubfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Unsigned extend byte.
+ void uxtb(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 7);
+ }
+
+ // Unsigned extend halfword.
+ void uxth(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 15);
+ }
+
+ // Unsigned extend word.
+ void uxtw(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 31);
+ }
+
+ // Extract.
+ void extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+
+ // Conditional select: rd = cond ? rn : rm.
+ void csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select increment: rd = cond ? rn : rm + 1.
+ void csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select inversion: rd = cond ? rn : ~rm.
+ void csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select negation: rd = cond ? rn : -rm.
+ void csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional set: rd = cond ? 1 : 0.
+ void cset(const Register& rd, Condition cond);
+
+ // Conditional set minus: rd = cond ? -1 : 0.
+ void csetm(const Register& rd, Condition cond);
+
+ // Conditional increment: rd = cond ? rn + 1 : rn.
+ void cinc(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional invert: rd = cond ? ~rn : rn.
+ void cinv(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional negate: rd = cond ? -rn : rn.
+ void cneg(const Register& rd, const Register& rn, Condition cond);
+
+ // Extr aliases.
+ void ror(const Register& rd, const Register& rs, unsigned shift) {
+ extr(rd, rs, rs, shift);
+ }
+
+ // Conditional comparison.
+ // Conditional compare negative.
+ void ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Conditional compare.
+ void ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Multiplication.
+ // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
+ void mul(const Register& rd, const Register& rn, const Register& rm);
+
+ // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
+ void madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
+ void mneg(const Register& rd, const Register& rn, const Register& rm);
+
+ // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
+ void msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // 32 x 32 -> 64-bit multiply.
+ void smull(const Register& rd, const Register& rn, const Register& rm);
+
+ // Xd = bits<127:64> of Xn * Xm.
+ void smulh(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed 32 x 32 -> 64-bit multiply and accumulate.
+ void smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
+ void umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed 32 x 32 -> 64-bit multiply and subtract.
+ void smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned 32 x 32 -> 64-bit multiply and subtract.
+ void umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed integer divide.
+ void sdiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Unsigned integer divide.
+ void udiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bit count, bit reverse and endian reverse.
+ void rbit(const Register& rd, const Register& rn);
+ void rev16(const Register& rd, const Register& rn);
+ void rev32(const Register& rd, const Register& rn);
+ void rev(const Register& rd, const Register& rn);
+ void clz(const Register& rd, const Register& rn);
+ void cls(const Register& rd, const Register& rn);
+
+ // Memory instructions.
+
+ // Load literal from pc + offset_from_pc.
+ void LoadLiteral(const CPURegister& rt, int offset_from_pc);
+
+ // Load integer or FP register.
+ void ldr(const CPURegister& rt, const MemOperand& src);
+
+ // Store integer or FP register.
+ void str(const CPURegister& rt, const MemOperand& dst);
+
+ // Load word with sign extension.
+ void ldrsw(const Register& rt, const MemOperand& src);
+
+ // Load byte.
+ void ldrb(const Register& rt, const MemOperand& src);
+
+ // Store byte.
+ void strb(const Register& rt, const MemOperand& dst);
+
+ // Load byte with sign extension.
+ void ldrsb(const Register& rt, const MemOperand& src);
+
+ // Load half-word.
+ void ldrh(const Register& rt, const MemOperand& src);
+
+ // Store half-word.
+ void strh(const Register& rt, const MemOperand& dst);
+
+ // Load half-word with sign extension.
+ void ldrsh(const Register& rt, const MemOperand& src);
+
+ // Load integer or FP register pair.
+ void ldp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair.
+ void stp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load word pair with sign extension.
+ void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Load integer or FP register pair, non-temporal.
+ void ldnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair, non-temporal.
+ void stnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load literal to register.
+ void ldr(const Register& rt, uint64_t imm);
+
+ // Load literal to FP register.
+ void ldr(const FPRegister& ft, double imm);
+ void ldr(const FPRegister& ft, float imm);
+
+ // Move instructions. The default shift of -1 indicates that the move
+ // instruction will calculate an appropriate 16-bit immediate and left shift
+ // that is equal to the 64-bit immediate argument. If an explicit left shift
+ // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
+ //
+ // For movk, an explicit shift can be used to indicate which half word should
+ // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
+ // half word with zero, whereas movk(x0, 0, 48) will overwrite the
+ // most-significant.
+
+ // Move and keep.
+ void movk(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVK);
+ }
+
+ // Move with non-zero.
+ void movn(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVN);
+ }
+
+ // Move with zero.
+ void movz(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVZ);
+ }
+
+ // Misc instructions.
+ // Monitor debug-mode breakpoint.
+ void brk(int code);
+
+ // Halting debug-mode breakpoint.
+ void hlt(int code);
+
+ // Move register to register.
+ void mov(const Register& rd, const Register& rn);
+
+ // Move NOT(operand) to register.
+ void mvn(const Register& rd, const Operand& operand);
+
+ // System instructions.
+ // Move to register from system register.
+ void mrs(const Register& rt, SystemRegister sysreg);
+
+ // Move from register to system register.
+ void msr(SystemRegister sysreg, const Register& rt);
+
+ // System hint.
+ void hint(SystemHint code);
+
+ // Data memory barrier
+ void dmb(BarrierDomain domain, BarrierType type);
+
+ // Data synchronization barrier
+ void dsb(BarrierDomain domain, BarrierType type);
+
+ // Instruction synchronization barrier
+ void isb();
+
+ // Alias for system instructions.
+ void nop() { hint(NOP); }
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ DEBUG_BREAK_NOP,
+ INTERRUPT_CODE_NOP,
+ FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
+ LAST_NOP_MARKER = INTERRUPT_CODE_NOP
+ };
+
+ void nop(NopMarkerTypes n) {
+ ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
+ mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
+ }
+
+ // FP instructions.
+ // Move immediate to FP register.
+ void fmov(FPRegister fd, double imm);
+ void fmov(FPRegister fd, float imm);
+
+ // Move FP register to register.
+ void fmov(Register rd, FPRegister fn);
+
+ // Move register to FP register.
+ void fmov(FPRegister fd, Register rn);
+
+ // Move FP register to FP register.
+ void fmov(FPRegister fd, FPRegister fn);
+
+ // FP add.
+ void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP subtract.
+ void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP multiply.
+ void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP fused multiply and add.
+ void fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply and subtract.
+ void fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, add and negate.
+ void fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, subtract and negate.
+ void fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP divide.
+ void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP absolute.
+ void fabs(const FPRegister& fd, const FPRegister& fn);
+
+ // FP negate.
+ void fneg(const FPRegister& fd, const FPRegister& fn);
+
+ // FP square root.
+ void fsqrt(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to away).
+ void frinta(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to even).
+ void frintn(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (towards zero.)
+ void frintz(const FPRegister& fd, const FPRegister& fn);
+
+ // FP compare registers.
+ void fcmp(const FPRegister& fn, const FPRegister& fm);
+
+ // FP compare immediate.
+ void fcmp(const FPRegister& fn, double value);
+
+ // FP conditional compare.
+ void fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP conditional select.
+ void fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+
+ // Common FP Convert function
+ void FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op);
+
+ // FP convert between single and double precision.
+ void fcvt(const FPRegister& fd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to away).
+ void fcvtau(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to away).
+ void fcvtas(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards -infinity).
+ void fcvtmu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (round towards -infinity).
+ void fcvtms(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to even).
+ void fcvtnu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to even).
+ void fcvtns(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards zero).
+ void fcvtzu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (rounf towards zero).
+ void fcvtzs(const Register& rd, const FPRegister& fn);
+
+ // Convert signed integer or fixed point to FP.
+ void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Convert unsigned integer or fixed point to FP.
+ void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Instruction functions used only for test, debug, and patching.
+ // Emit raw instructions in the instruction stream.
+ void dci(Instr raw_inst) { Emit(raw_inst); }
+
+ // Emit 8 bits of data in the instruction stream.
+ void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 32 bits of data in the instruction stream.
+ void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 64 bits of data in the instruction stream.
+ void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
+
+ // Copy a string into the instruction stream, including the terminating NULL
+ // character. The instruction pointer (pc_) is then aligned correctly for
+ // subsequent instructions.
+ void EmitStringData(const char * string) {
+ size_t len = strlen(string) + 1;
+ ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+ EmitData(string, len);
+ // Pad with NULL characters until pc_ is aligned.
+ const char pad[] = {'\0', '\0', '\0', '\0'};
+ STATIC_ASSERT(sizeof(pad) == kInstructionSize);
+ byte* next_pc = AlignUp(pc_, kInstructionSize);
+ EmitData(&pad, next_pc - pc_);
+ }
+
+ // Pseudo-instructions ------------------------------------------------------
+
+ // Parameters are described in arm64/instructions-arm64.h.
+ void debug(const char* message, uint32_t code, Instr params = BREAK);
+
+ // Required by V8.
+ void dd(uint32_t data) { dc32(data); }
+ void db(uint8_t data) { dc8(data); }
+
+ // Code generation helpers --------------------------------------------------
+
+ unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
+
+ Instruction* InstructionAt(int offset) const {
+ return reinterpret_cast<Instruction*>(buffer_ + offset);
+ }
+
+ // Register encoding.
+ static Instr Rd(CPURegister rd) {
+ ASSERT(rd.code() != kSPRegInternalCode);
+ return rd.code() << Rd_offset;
+ }
+
+ static Instr Rn(CPURegister rn) {
+ ASSERT(rn.code() != kSPRegInternalCode);
+ return rn.code() << Rn_offset;
+ }
+
+ static Instr Rm(CPURegister rm) {
+ ASSERT(rm.code() != kSPRegInternalCode);
+ return rm.code() << Rm_offset;
+ }
+
+ static Instr Ra(CPURegister ra) {
+ ASSERT(ra.code() != kSPRegInternalCode);
+ return ra.code() << Ra_offset;
+ }
+
+ static Instr Rt(CPURegister rt) {
+ ASSERT(rt.code() != kSPRegInternalCode);
+ return rt.code() << Rt_offset;
+ }
+
+ static Instr Rt2(CPURegister rt2) {
+ ASSERT(rt2.code() != kSPRegInternalCode);
+ return rt2.code() << Rt2_offset;
+ }
+
+ // These encoding functions allow the stack pointer to be encoded, and
+ // disallow the zero register.
+ static Instr RdSP(Register rd) {
+ ASSERT(!rd.IsZero());
+ return (rd.code() & kRegCodeMask) << Rd_offset;
+ }
+
+ static Instr RnSP(Register rn) {
+ ASSERT(!rn.IsZero());
+ return (rn.code() & kRegCodeMask) << Rn_offset;
+ }
+
+ // Flags encoding.
+ inline static Instr Flags(FlagsUpdate S);
+ inline static Instr Cond(Condition cond);
+
+ // PC-relative address encoding.
+ inline static Instr ImmPCRelAddress(int imm21);
+
+ // Branch encoding.
+ inline static Instr ImmUncondBranch(int imm26);
+ inline static Instr ImmCondBranch(int imm19);
+ inline static Instr ImmCmpBranch(int imm19);
+ inline static Instr ImmTestBranch(int imm14);
+ inline static Instr ImmTestBranchBit(unsigned bit_pos);
+
+ // Data Processing encoding.
+ inline static Instr SF(Register rd);
+ inline static Instr ImmAddSub(int64_t imm);
+ inline static Instr ImmS(unsigned imms, unsigned reg_size);
+ inline static Instr ImmR(unsigned immr, unsigned reg_size);
+ inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
+ inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
+ inline static Instr ImmLLiteral(int imm19);
+ inline static Instr BitN(unsigned bitn, unsigned reg_size);
+ inline static Instr ShiftDP(Shift shift);
+ inline static Instr ImmDPShift(unsigned amount);
+ inline static Instr ExtendMode(Extend extend);
+ inline static Instr ImmExtendShift(unsigned left_shift);
+ inline static Instr ImmCondCmp(unsigned imm);
+ inline static Instr Nzcv(StatusFlags nzcv);
+
+ // MemOperand offset encoding.
+ inline static Instr ImmLSUnsigned(int imm12);
+ inline static Instr ImmLS(int imm9);
+ inline static Instr ImmLSPair(int imm7, LSDataSize size);
+ inline static Instr ImmShiftLS(unsigned shift_amount);
+ inline static Instr ImmException(int imm16);
+ inline static Instr ImmSystemRegister(int imm15);
+ inline static Instr ImmHint(int imm7);
+ inline static Instr ImmBarrierDomain(int imm2);
+ inline static Instr ImmBarrierType(int imm2);
+ inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
+
+ // Move immediates encoding.
+ inline static Instr ImmMoveWide(uint64_t imm);
+ inline static Instr ShiftMoveWide(int64_t shift);
+
+ // FP Immediates.
+ static Instr ImmFP32(float imm);
+ static Instr ImmFP64(double imm);
+ inline static Instr FPScale(unsigned scale);
+
+ // FP register type.
+ inline static Instr FPType(FPRegister fd);
+
+ // Class for scoping postponing the constant pool generation.
+ class BlockConstPoolScope {
+ public:
+ explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockConstPool();
+ }
+ ~BlockConstPoolScope() {
+ assem_->EndBlockConstPool();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
+ };
+
+ // Check if is time to emit a constant pool.
+ void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ // Returns true if we should emit a veneer as soon as possible for a branch
+ // which can at most reach to specified pc.
+ bool ShouldEmitVeneer(int max_reachable_pc,
+ int margin = kVeneerDistanceMargin);
+ bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
+ return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
+ }
+
+ // The maximum code size generated for a veneer. Currently one branch
+ // instruction. This is for code size checking purposes, and can be extended
+ // in the future for example if we decide to add nops between the veneers.
+ static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
+
+ void RecordVeneerPool(int location_offset, int size);
+ // Emits veneers for branches that are approaching their maximum range.
+ // If need_protection is true, the veneers are protected by a branch jumping
+ // over the code.
+ void EmitVeneers(bool force_emit, bool need_protection,
+ int margin = kVeneerDistanceMargin);
+ void EmitVeneersGuard() { EmitPoolGuard(); }
+ // Checks whether veneers need to be emitted at this point.
+ // If force_emit is set, a veneer is generated for *all* unresolved branches.
+ void CheckVeneerPool(bool force_emit, bool require_jump,
+ int margin = kVeneerDistanceMargin);
+
+
+ class BlockPoolsScope {
+ public:
+ explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockPools();
+ }
+ ~BlockPoolsScope() {
+ assem_->EndBlockPools();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
+ };
+
+ // Available for constrained code generation scopes. Prefer
+ // MacroAssembler::Mov() when possible.
+ inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
+
+ protected:
+ inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
+
+ void LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+ static bool IsImmLSUnscaled(ptrdiff_t offset);
+ static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
+
+ void Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+ void LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op);
+ static bool IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r);
+
+ void ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ static bool IsImmConditionalCompare(int64_t immediate);
+
+ void AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Functions for emulating operands not directly supported by the instruction
+ // set.
+ void EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned amount);
+ void EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift);
+
+ void AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+ static bool IsImmAddSub(int64_t immediate);
+
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+
+ // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
+ // registers. Only simple loads are supported; sign- and zero-extension (such
+ // as in LDPSW_x or LDRB_w) are not supported.
+ static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
+ static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
+ static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+
+ // Remove the specified branch from the unbound label link chain.
+ // If available, a veneer for this label can be used for other branches in the
+ // chain if the link chain cannot be fixed up without this branch.
+ void RemoveBranchFromLabelLinkChain(Instruction* branch,
+ Label* label,
+ Instruction* label_veneer = NULL);
+
+ private:
+ // Instruction helpers.
+ void MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op);
+ void DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+ void LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op);
+ // Register the relocation information for the operand and load its value
+ // into rt.
+ void LoadRelocatedValue(const CPURegister& rt,
+ const Operand& operand,
+ LoadLiteralOp op);
+ void ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op);
+ void DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op);
+ void DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op);
+ void FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op);
+ void FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op);
+ void FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op);
+
+ // Label helpers.
+
+ // Return an offset for a label-referencing instruction, typically a branch.
+ int LinkAndGetByteOffsetTo(Label* label);
+
+ // This is the same as LinkAndGetByteOffsetTo, but return an offset
+ // suitable for fields that take instruction offsets.
+ inline int LinkAndGetInstructionOffsetTo(Label* label);
+
+ static const int kStartOfLabelLinkChain = 0;
+
+ // Verify that a label's link chain is intact.
+ void CheckLabelLinkChain(Label const * label);
+
+ void RecordLiteral(int64_t imm, unsigned size);
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
+ // Emit the instruction at pc_.
+ void Emit(Instr instruction) {
+ STATIC_ASSERT(sizeof(*pc_) == 1);
+ STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+ ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+
+ memcpy(pc_, &instruction, sizeof(instruction));
+ pc_ += sizeof(instruction);
+ CheckBuffer();
+ }
+
+ // Emit data inline in the instruction stream.
+ void EmitData(void const * data, unsigned size) {
+ ASSERT(sizeof(*pc_) == 1);
+ ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
+
+ // TODO(all): Somehow register we have some data here. Then we can
+ // disassemble it correctly.
+ memcpy(pc_, data, size);
+ pc_ += size;
+ CheckBuffer();
+ }
+
+ void GrowBuffer();
+ void CheckBuffer();
+
+ // Pc offset of the next constant pool check.
+ int next_constant_pool_check_;
+
+ // Constant pool generation
+ // Pools are emitted in the instruction stream, preferably after unconditional
+ // jumps or after returns from functions (in dead code locations).
+ // If a long code sequence does not contain unconditional jumps, it is
+ // necessary to emit the constant pool before the pool gets too far from the
+ // location it is accessed from. In this case, we emit a jump over the emitted
+ // constant pool.
+ // Constants in the pool may be addresses of functions that gets relocated;
+ // if so, a relocation info entry is associated to the constant pool entry.
+
+ // Repeated checking whether the constant pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated. That also means that the sizing of the buffers is not
+ // an exact science, and that we rely on some slop to not overrun buffers.
+ static const int kCheckConstPoolIntervalInst = 128;
+ static const int kCheckConstPoolInterval =
+ kCheckConstPoolIntervalInst * kInstructionSize;
+
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant.
+ static const int kMaxDistToConstPool = 4 * KB;
+ static const int kMaxNumPendingRelocInfo =
+ kMaxDistToConstPool / kInstructionSize;
+
+
+ // Average distance beetween a constant pool and the first instruction
+ // accessing the constant pool. Longer distance should result in less I-cache
+ // pollution.
+ // In practice the distance will be smaller since constant pool emission is
+ // forced after function return and sometimes after unconditional branches.
+ static const int kAvgDistToConstPool =
+ kMaxDistToConstPool - kCheckConstPoolInterval;
+
+ // Emission of the constant pool may be blocked in some code sequences.
+ int const_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_const_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_const_pool_use_;
+
+ // Emission of the veneer pools may be blocked in some code sequences.
+ int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // Relocation info records are also used during code generation as temporary
+ // containers for constants and code target addresses until they are emitted
+ // to the constant pool. These pending relocation info records are temporarily
+ // stored in a separate buffer until a constant pool is emitted.
+ // If every instruction in a long sequence is accessing the pool, we need one
+ // pending relocation entry per instruction.
+
+ // the buffer of pending relocation info
+ RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
+ // number of pending reloc info entries in the buffer
+ int num_pending_reloc_info_;
+
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ TypeFeedbackId recorded_ast_id_;
+
+ inline TypeFeedbackId RecordedAstId();
+ inline void ClearRecordedAstId();
+
+ protected:
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ ASSERT(recorded_ast_id_.IsNone());
+ recorded_ast_id_ = ast_id;
+ }
+
+ // Code generation
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries, and debug strings encoded in the instruction
+ // stream.
+ static const int kGap = 128;
+
+ public:
+ class FarBranchInfo {
+ public:
+ FarBranchInfo(int offset, Label* label)
+ : pc_offset_(offset), label_(label) {}
+ // Offset of the branch in the code generation buffer.
+ int pc_offset_;
+ // The label branched to.
+ Label* label_;
+ };
+
+ protected:
+ // Information about unresolved (forward) branches.
+ // The Assembler is only allowed to delete out-of-date information from here
+ // after a label is bound. The MacroAssembler uses this information to
+ // generate veneers.
+ //
+ // The second member gives information about the unresolved branch. The first
+ // member of the pair is the maximum offset that the branch can reach in the
+ // buffer. The map is sorted according to this reachable offset, allowing to
+ // easily check when veneers need to be emitted.
+ // Note that the maximum reachable offset (first member of the pairs) should
+ // always be positive but has the same type as the return value for
+ // pc_offset() for convenience.
+ std::multimap<int, FarBranchInfo> unresolved_branches_;
+
+ // We generate a veneer for a branch if we reach within this distance of the
+ // limit of the range.
+ static const int kVeneerDistanceMargin = 1 * KB;
+ // The factor of 2 is a finger in the air guess. With a default margin of
+ // 1KB, that leaves us an addional 256 instructions to avoid generating a
+ // protective branch.
+ static const int kVeneerNoProtectionFactor = 2;
+ static const int kVeneerDistanceCheckMargin =
+ kVeneerNoProtectionFactor * kVeneerDistanceMargin;
+ int unresolved_branches_first_limit() const {
+ ASSERT(!unresolved_branches_.empty());
+ return unresolved_branches_.begin()->first;
+ }
+ // This is similar to next_constant_pool_check_ and helps reduce the overhead
+ // of checking for veneer pools.
+ // It is maintained to the closest unresolved branch limit minus the maximum
+ // veneer margin (or kMaxInt if there are no unresolved branches).
+ int next_veneer_pool_check_;
+
+ private:
+ // If a veneer is emitted for a branch instruction, that instruction must be
+ // removed from the associated label's link chain so that the assembler does
+ // not later attempt (likely unsuccessfully) to patch it to branch directly to
+ // the label.
+ void DeleteUnresolvedBranchInfoForLabel(Label* label);
+
+ private:
+ PositionsRecorder positions_recorder_;
+ friend class PositionsRecorder;
+ friend class EnsureSpace;
+};
+
+class PatchingAssembler : public Assembler {
+ public:
+ // Create an Assembler with a buffer starting at 'start'.
+ // The buffer size is
+ // size of instructions to patch + kGap
+ // Where kGap is the distance from which the Assembler tries to grow the
+ // buffer.
+ // If more or fewer instructions than expected are generated or if some
+ // relocation information takes space in the buffer, the PatchingAssembler
+ // will crash trying to grow the buffer.
+ PatchingAssembler(Instruction* start, unsigned count)
+ : Assembler(NULL,
+ reinterpret_cast<byte*>(start),
+ count * kInstructionSize + kGap) {
+ StartBlockPools();
+ }
+
+ PatchingAssembler(byte* start, unsigned count)
+ : Assembler(NULL, start, count * kInstructionSize + kGap) {
+ // Block constant pool emission.
+ StartBlockPools();
+ }
+
+ ~PatchingAssembler() {
+ // Const pool should still be blocked.
+ ASSERT(is_const_pool_blocked());
+ EndBlockPools();
+ // Verify we have generated the number of instruction we expected.
+ ASSERT((pc_offset() + kGap) == buffer_size_);
+ // Verify no relocation information has been emitted.
+ ASSERT(num_pending_reloc_info() == 0);
+ // Flush the Instruction cache.
+ size_t length = buffer_size_ - kGap;
+ CPU::FlushICache(buffer_, length);
+ }
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) {
+ assembler->CheckBuffer();
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
new file mode 100644
index 0000000000..01ac4cc5db
--- /dev/null
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -0,0 +1,1562 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "codegen.h"
+#include "debug.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the native context.
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ Ldr(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+ Register result) {
+ // Load the native context.
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ Ldr(result, ContextMemOperand(result,
+ Context::INTERNAL_ARRAY_FUNCTION_INDEX));
+}
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments excluding receiver
+ // -- x1 : called function (only guaranteed when
+ // extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument (argc == x0)
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ Push(x1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects x0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ Add(x0, x0, num_extra_args + 1);
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_InternalArrayCode");
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ GenerateLoadInternalArrayFunction(masm, x1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Tst(x10, kSmiTagMask);
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+ __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_ArrayCode");
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, x1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Tst(x10, kSmiTagMask);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_StringConstructCode");
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, x10, x11);
+
+ Register argc = x0;
+ Register function = x1;
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, x10);
+ __ Cmp(function, x10);
+ __ Assert(eq, kUnexpectedStringFunction);
+ }
+
+ // Load the first arguments in x0 and get rid of the rest.
+ Label no_arguments;
+ __ Cbz(argc, &no_arguments);
+ // First args = sp[(argc - 1) * 8].
+ __ Sub(argc, argc, 1);
+ __ Claim(argc, kXRegSize);
+ // jssp now point to args[0], load and drop args[0] + receiver.
+ Register arg = argc;
+ __ Ldr(arg, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ argc = NoReg;
+
+ Register argument = x2;
+ Label not_cached, argument_is_string;
+ __ LookupNumberStringCache(arg, // Input.
+ argument, // Result.
+ x10, // Scratch.
+ x11, // Scratch.
+ x12, // Scratch.
+ &not_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, x10, x11);
+ __ Bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- x2 : argument converted to string
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -----------------------------------
+
+ Label gc_required;
+ Register new_obj = x0;
+ __ Allocate(JSValue::kSize, new_obj, x10, x11, &gc_required, TAG_OBJECT);
+
+ // Initialize the String object.
+ Register map = x3;
+ __ LoadGlobalFunctionInitialMap(function, map, x10);
+ if (FLAG_debug_code) {
+ __ Ldrb(x4, FieldMemOperand(map, Map::kInstanceSizeOffset));
+ __ Cmp(x4, JSValue::kSize >> kPointerSizeLog2);
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
+ __ Ldrb(x4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+ __ Cmp(x4, 0);
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+ }
+ __ Str(map, FieldMemOperand(new_obj, HeapObject::kMapOffset));
+
+ Register empty = x3;
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(empty, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
+ __ Str(empty, FieldMemOperand(new_obj, JSObject::kElementsOffset));
+
+ __ Str(argument, FieldMemOperand(new_obj, JSValue::kValueOffset));
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == (4 * kPointerSize));
+
+ __ Ret();
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ Bind(&not_cached);
+ __ JumpIfSmi(arg, &convert_argument);
+
+ // Is it a String?
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Tbnz(x11, MaskToBit(kIsNotStringMask), &convert_argument);
+ __ Mov(argument, arg);
+ __ IncrementCounter(counters->string_ctor_string_value(), 1, x10, x11);
+ __ B(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into x2.
+ __ Bind(&convert_argument);
+ __ Push(function); // Preserve the function.
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, x10, x11);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(arg);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
+ __ Pop(function);
+ __ Mov(argument, x0);
+ __ B(&argument_is_string);
+
+ // Load the empty string into x2, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ Bind(&no_arguments);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ B(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to create a
+ // string wrapper.
+ __ Bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, x10, x11);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
+ __ Ret();
+}
+
+
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // - Push a copy of the function onto the stack.
+ // - Push another copy as a parameter to the runtime call.
+ __ Push(x1, x1);
+
+ __ CallRuntime(function_id, 1);
+
+ // - Restore receiver.
+ __ Pop(x1);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x2);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x0);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However, not
+ // checking may delay installing ready functions, and always checking would be
+ // quite expensive. A good compromise is to first check against stack limit as
+ // a cue for an interrupt signal.
+ Label ok;
+ __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ Bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions,
+ bool create_memento) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- x2 : allocation site or undefined
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
+ // Should never count constructions for api objects.
+ ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
+
+ Isolate* isolate = masm->isolate();
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the three incoming parameters on the stack.
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(x2, x10);
+ __ Push(x2);
+ }
+
+ Register argc = x0;
+ Register constructor = x1;
+ // x1: constructor function
+ __ SmiTag(argc);
+ __ Push(argc, constructor);
+ // sp[0] : Constructor function.
+ // sp[1]: number of arguments (smi-tagged)
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+#if ENABLE_DEBUGGER_SUPPORT
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ Mov(x2, Operand(debug_step_in_fp));
+ __ Ldr(x2, MemOperand(x2));
+ __ Cbnz(x2, &rt_call);
+#endif
+ // Load the initial map and verify that it is in fact a map.
+ Register init_map = x2;
+ __ Ldr(init_map,
+ FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(init_map, &rt_call);
+ __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the initial
+ // map's instance type would be JS_FUNCTION_TYPE.
+ __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
+ __ B(eq, &rt_call);
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ Ldr(x3, FieldMemOperand(constructor,
+ JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(x3, SharedFunctionInfo::kConstructionCountOffset);
+ __ Ldrb(x4, constructor_count);
+ __ Subs(x4, x4, 1);
+ __ Strb(x4, constructor_count);
+ __ B(ne, &allocate);
+
+ // Push the constructor and map to the stack, and the constructor again
+ // as argument to the runtime call.
+ __ Push(constructor, init_map, constructor);
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+ __ Pop(init_map, constructor);
+ __ Bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ Register obj_size = x3;
+ Register new_obj = x4;
+ __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ Add(x7, obj_size,
+ Operand(AllocationMemento::kSize / kPointerSize));
+ __ Allocate(x7, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+ } else {
+ __ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+ }
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // NB. the object pointer is not tagged, so MemOperand is used.
+ Register empty = x5;
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(init_map, MemOperand(new_obj, JSObject::kMapOffset));
+ STATIC_ASSERT(JSObject::kElementsOffset ==
+ (JSObject::kPropertiesOffset + kPointerSize));
+ __ Stp(empty, empty, MemOperand(new_obj, JSObject::kPropertiesOffset));
+
+ Register first_prop = x5;
+ __ Add(first_prop, new_obj, JSObject::kHeaderSize);
+
+ // Fill all of the in-object properties with the appropriate filler.
+ Register undef = x7;
+ __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
+
+ // Obtain number of pre-allocated property fields and in-object
+ // properties.
+ Register prealloc_fields = x10;
+ Register inobject_props = x11;
+ Register inst_sizes = x11;
+ __ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset));
+ __ Ubfx(prealloc_fields, inst_sizes,
+ Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ Ubfx(inobject_props, inst_sizes,
+ Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte);
+
+ // Calculate number of property fields in the object.
+ Register prop_fields = x6;
+ __ Sub(prop_fields, obj_size, JSObject::kHeaderSize / kPointerSize);
+
+ if (count_constructions) {
+ // Fill the pre-allocated fields with undef.
+ __ FillFields(first_prop, prealloc_fields, undef);
+
+ // Register first_non_prealloc is the offset of the first field after
+ // pre-allocated fields.
+ Register first_non_prealloc = x12;
+ __ Add(first_non_prealloc, first_prop,
+ Operand(prealloc_fields, LSL, kPointerSizeLog2));
+
+ first_prop = NoReg;
+
+ if (FLAG_debug_code) {
+ Register obj_end = x5;
+ __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
+ __ Cmp(first_non_prealloc, obj_end);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+
+ // Fill the remaining fields with one pointer filler map.
+ Register one_pointer_filler = x5;
+ Register non_prealloc_fields = x6;
+ __ LoadRoot(one_pointer_filler, Heap::kOnePointerFillerMapRootIndex);
+ __ Sub(non_prealloc_fields, prop_fields, prealloc_fields);
+ __ FillFields(first_non_prealloc, non_prealloc_fields,
+ one_pointer_filler);
+ prop_fields = NoReg;
+ } else if (create_memento) {
+ // Fill the pre-allocated fields with undef.
+ __ FillFields(first_prop, prop_fields, undef);
+ __ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
+ __ LoadRoot(x14, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
+ // Load the AllocationSite
+ __ Peek(x14, 2 * kXRegSize);
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
+ first_prop = NoReg;
+ } else {
+ // Fill all of the property fields with undef.
+ __ FillFields(first_prop, prop_fields, undef);
+ first_prop = NoReg;
+ prop_fields = NoReg;
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ Add(new_obj, new_obj, kHeapObjectTag);
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not, or fall through to runtime call if it is.
+ Register element_count = x3;
+ __ Ldrb(element_count,
+ FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ Add(element_count, element_count, prealloc_fields);
+ __ Subs(element_count, element_count, inobject_props);
+
+ // Done if no extra properties are to be allocated.
+ __ B(eq, &allocated);
+ __ Assert(pl, kPropertyAllocationCountFailed);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ Register new_array = x5;
+ Register array_size = x6;
+ __ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize);
+ __ Allocate(array_size, new_array, x11, x12, &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP |
+ SIZE_IN_WORDS));
+
+ Register array_map = x10;
+ __ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex);
+ __ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset));
+ __ SmiTag(x0, element_count);
+ __ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset));
+
+ // Initialize the fields to undefined.
+ Register elements = x10;
+ __ Add(elements, new_array, FixedArray::kHeaderSize);
+ __ FillFields(elements, element_count, undef);
+
+ // Store the initialized FixedArray into the properties field of the
+ // JSObject.
+ __ Add(new_array, new_array, kHeapObjectTag);
+ __ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ __ B(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ __ Bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(new_obj, x14);
+ }
+
+ // Allocate the new receiver object using the runtime call.
+ __ Bind(&rt_call);
+ Label count_incremented;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ Peek(x4, 2 * kXRegSize);
+ __ Push(x4);
+ __ Push(constructor); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ __ Mov(x4, x0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ __ jmp(&count_incremented);
+ } else {
+ __ Push(constructor); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ __ Mov(x4, x0);
+ }
+
+ // Receiver for constructor call allocated.
+ // x4: JSObject
+ __ Bind(&allocated);
+
+ if (create_memento) {
+ __ Peek(x10, 2 * kXRegSize);
+ __ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
+ // r2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ Ldr(x5, FieldMemOperand(x10,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ Add(x5, x5, Operand(Smi::FromInt(1)));
+ __ Str(x5, FieldMemOperand(x10,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
+ __ Push(x4, x4);
+
+ // Reload the number of arguments from the stack.
+ // Set it up in x0 for the function call below.
+ // jssp[0]: receiver
+ // jssp[1]: receiver
+ // jssp[2]: constructor function
+ // jssp[3]: number of arguments (smi-tagged)
+ __ Peek(constructor, 2 * kXRegSize); // Load constructor.
+ __ Peek(argc, 3 * kXRegSize); // Load number of arguments.
+ __ SmiUntag(argc);
+
+ // Set up pointer to last argument.
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
+
+ // Copy arguments and receiver to the expression stack.
+ // Copy 2 values every loop to use ldp/stp.
+ // x0: number of arguments
+ // x1: constructor function
+ // x2: address of last argument (caller sp)
+ // jssp[0]: receiver
+ // jssp[1]: receiver
+ // jssp[2]: constructor function
+ // jssp[3]: number of arguments (smi-tagged)
+ // Compute the start address of the copy in x3.
+ __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
+ Label loop, entry, done_copying_arguments;
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldp(x10, x11, MemOperand(x3, -2 * kPointerSize, PreIndex));
+ __ Push(x11, x10);
+ __ Bind(&entry);
+ __ Cmp(x3, x2);
+ __ B(gt, &loop);
+ // Because we copied values 2 by 2 we may have copied one extra value.
+ // Drop it if that is the case.
+ __ B(eq, &done_copying_arguments);
+ __ Drop(1);
+ __ Bind(&done_copying_arguments);
+
+ // Call the function.
+ // x0: number of arguments
+ // x1: constructor function
+ if (is_api_function) {
+ __ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(argc);
+ __ InvokeFunction(constructor, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function && !count_constructions) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore the context from the frame.
+ // x0: result
+ // jssp[0]: receiver
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(x0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ JumpIfObjectType(x0, x1, x3, FIRST_SPEC_OBJECT_TYPE, &exit, ge);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ Bind(&use_receiver);
+ __ Peek(x0, 0);
+
+ // Remove the receiver from the stack, remove caller arguments, and
+ // return.
+ __ Bind(&exit);
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Peek(x1, 2 * kXRegSize);
+
+ // Leave construct frame.
+ }
+
+ __ DropBySMI(x1);
+ __ Drop(1);
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+
+// Input:
+// x0: code entry.
+// x1: function.
+// x2: receiver.
+// x3: argc.
+// x4: argv.
+// Output:
+// x0: result.
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from JSEntryStub::GenerateBody().
+ Register function = x1;
+ Register receiver = x2;
+ Register argc = x3;
+ Register argv = x4;
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Clear the context before we push it when entering the internal frame.
+ __ Mov(cp, 0);
+
+ {
+ // Enter an internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Set up the context from the function argument.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ __ InitializeRootRegister();
+
+ // Push the function and the receiver onto the stack.
+ __ Push(function, receiver);
+
+ // Copy arguments to the stack in a loop, in reverse order.
+ // x3: argc.
+ // x4: argv.
+ Label loop, entry;
+ // Compute the copy end address.
+ __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2));
+
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
+ __ Ldr(x12, MemOperand(x11)); // Dereference the handle.
+ __ Push(x12); // Push the argument.
+ __ Bind(&entry);
+ __ Cmp(x10, argv);
+ __ B(ne, &loop);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ // The original values have been saved in JSEntryStub::GenerateBody().
+ __ LoadRoot(x19, Heap::kUndefinedValueRootIndex);
+ __ Mov(x20, x19);
+ __ Mov(x21, x19);
+ __ Mov(x22, x19);
+ __ Mov(x23, x19);
+ __ Mov(x24, x19);
+ __ Mov(x25, x19);
+ // Don't initialize the reserved registers.
+ // x26 : root register (root).
+ // x27 : context pointer (cp).
+ // x28 : JS stack pointer (jssp).
+ // x29 : frame pointer (fp).
+
+ __ Mov(x0, argc);
+ if (is_construct) {
+ // No type feedback cell is available.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ CallStub(&stub);
+ } else {
+ ParameterCount actual(x0);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+ // Exit the JS internal frame and remove the parameters (except function),
+ // and return.
+ }
+
+ // Result is in x0. Return.
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ Register function = x1;
+
+ // Preserve function. At the same time, push arguments for
+ // kHiddenCompileOptimized.
+ __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
+ __ Push(function, function, x10);
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+
+ // Restore receiver.
+ __ Pop(function);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code fast, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // The following caller-saved registers must be saved and restored when
+ // calling through to the runtime:
+ // x0 - The address from which to resume execution.
+ // x1 - isolate
+ // lr - The return address for the JSFunction itself. It has not yet been
+ // preserved on the stack because the frame setup code was replaced
+ // with a call to this stub, to handle code ageing.
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0, x1, fp, lr);
+ __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ Pop(lr, fp, x1, x0);
+ }
+
+ // The calling function has been made young again, so return to execute the
+ // real frame set-up code.
+ __ Br(x0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // The following caller-saved registers must be saved and restored when
+ // calling through to the runtime:
+ // x0 - The address from which to resume execution.
+ // x1 - isolate
+ // lr - The return address for the JSFunction itself. It has not yet been
+ // preserved on the stack because the frame setup code was replaced
+ // with a call to this stub, to handle code ageing.
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0, x1, fp, lr);
+ __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(
+ masm->isolate()), 2);
+ __ Pop(lr, fp, x1, x0);
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ EmitFrameSetupForCodeAgePatching(masm);
+ }
+
+ // Jump to point after the code-age stub.
+ __ Add(x0, x0, kCodeAgeSequenceSize);
+ __ Br(x0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ // TODO(jbramley): Is it correct (and appropriate) to use safepoint
+ // registers here? According to the comment above, we should only need to
+ // preserve the registers with parameters.
+ __ PushXRegList(kSafepointSavedRegisters);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
+ __ PopXRegList(kSafepointSavedRegisters);
+ }
+
+ // Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
+ __ Drop(1);
+
+ // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this
+ // into lr before it jumps here.
+ __ Br(lr);
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the deoptimization type to the runtime system.
+ __ Mov(x0, Smi::FromInt(static_cast<int>(type)));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+ }
+
+ // Get the full codegen state from the stack and untag it.
+ Register state = x6;
+ __ Peek(state, 0);
+ __ SmiUntag(state);
+
+ // Switch on the state.
+ Label with_tos_register, unknown_state;
+ __ CompareAndBranch(
+ state, FullCodeGenerator::NO_REGISTERS, ne, &with_tos_register);
+ __ Drop(1); // Remove state.
+ __ Ret();
+
+ __ Bind(&with_tos_register);
+ // Reload TOS register.
+ __ Peek(x0, kPointerSize);
+ __ CompareAndBranch(state, FullCodeGenerator::TOS_REG, ne, &unknown_state);
+ __ Drop(2); // Remove state and TOS.
+ __ Ret();
+
+ __ Bind(&unknown_state);
+ __ Abort(kInvalidFullCodegenState);
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
+ __ Push(x0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ // If the code object is null, just return to the unoptimized code.
+ Label skip;
+ __ CompareAndBranch(x0, Smi::FromInt(0), ne, &skip);
+ __ Ret();
+
+ __ Bind(&skip);
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ Ldrsw(w1, UntagSmiFieldMemOperand(x1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ Add(x0, x0, x1);
+ __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&ok);
+ __ Ret();
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ enum {
+ call_type_JS_func = 0,
+ call_type_func_proxy = 1,
+ call_type_non_func = 2
+ };
+ Register argc = x0;
+ Register function = x1;
+ Register call_type = x4;
+ Register scratch1 = x10;
+ Register scratch2 = x11;
+ Register receiver_type = x13;
+
+ ASM_LOCATION("Builtins::Generate_FunctionCall");
+ // 1. Make sure we have at least one argument.
+ { Label done;
+ __ Cbnz(argc, &done);
+ __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+ __ Push(scratch1);
+ __ Mov(argc, 1);
+ __ Bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ Label slow, non_function;
+ __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+ __ JumpIfSmi(function, &non_function);
+ __ JumpIfNotObjectType(function, scratch1, receiver_type,
+ JS_FUNCTION_TYPE, &slow);
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ Label shift_arguments;
+ __ Mov(call_type, static_cast<int>(call_type_JS_func));
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ // Also do not transform the receiver for native (Compilerhints already in
+ // x3).
+ __ Ldr(scratch1,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch2.W(),
+ FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestAndBranchIfAnySet(
+ scratch2.W(),
+ (1 << SharedFunctionInfo::kStrictModeFunction) |
+ (1 << SharedFunctionInfo::kNative),
+ &shift_arguments);
+
+ // Compute the receiver in sloppy mode.
+ Register receiver = x2;
+ __ Sub(scratch1, argc, 1);
+ __ Peek(receiver, Operand(scratch1, LSL, kXRegSizeLog2));
+ __ JumpIfSmi(receiver, &convert_to_object);
+
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
+ &use_global_receiver);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(receiver, scratch1, scratch2,
+ FIRST_SPEC_OBJECT_TYPE, &shift_arguments, ge);
+
+ __ Bind(&convert_to_object);
+
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(argc);
+
+ __ Push(argc, receiver);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(receiver, x0);
+
+ __ Pop(argc);
+ __ SmiUntag(argc);
+
+ // Exit the internal frame.
+ }
+
+ // Restore the function and flag in the registers.
+ __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+ __ Mov(call_type, static_cast<int>(call_type_JS_func));
+ __ B(&patch_receiver);
+
+ __ Bind(&use_global_receiver);
+ __ Ldr(receiver, GlobalObjectMemOperand());
+ __ Ldr(receiver,
+ FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+
+
+ __ Bind(&patch_receiver);
+ __ Sub(scratch1, argc, 1);
+ __ Poke(receiver, Operand(scratch1, LSL, kXRegSizeLog2));
+
+ __ B(&shift_arguments);
+ }
+
+ // 3b. Check for function proxy.
+ __ Bind(&slow);
+ __ Mov(call_type, static_cast<int>(call_type_func_proxy));
+ __ Cmp(receiver_type, JS_FUNCTION_PROXY_TYPE);
+ __ B(eq, &shift_arguments);
+ __ Bind(&non_function);
+ __ Mov(call_type, static_cast<int>(call_type_non_func));
+
+ // 3c. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ __ Sub(scratch1, argc, 1);
+ __ Poke(function, Operand(scratch1, LSL, kXRegSizeLog2));
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ __ Bind(&shift_arguments);
+ { Label loop;
+ // Calculate the copy start address (destination). Copy end address is jssp.
+ __ Add(scratch2, jssp, Operand(argc, LSL, kPointerSizeLog2));
+ __ Sub(scratch1, scratch2, kPointerSize);
+
+ __ Bind(&loop);
+ __ Ldr(x12, MemOperand(scratch1, -kPointerSize, PostIndex));
+ __ Str(x12, MemOperand(scratch2, -kPointerSize, PostIndex));
+ __ Cmp(scratch1, jssp);
+ __ B(ge, &loop);
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ Sub(argc, argc, 1);
+ __ Drop(1);
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ { Label js_function, non_proxy;
+ __ Cbz(call_type, &js_function);
+ // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+ __ Mov(x2, 0);
+ __ Cmp(call_type, static_cast<int>(call_type_func_proxy));
+ __ B(ne, &non_proxy);
+
+ __ Push(function); // Re-add proxy object as additional argument.
+ __ Add(argc, argc, 1);
+ __ GetBuiltinFunction(function, Builtins::CALL_FUNCTION_PROXY);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&non_proxy);
+ __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ Bind(&js_function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ __ Ldr(x3, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(x2,
+ FieldMemOperand(x3,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ Label dont_adapt_args;
+ __ Cmp(x2, argc); // Check formal and actual parameter counts.
+ __ B(eq, &dont_adapt_args);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ Bind(&dont_adapt_args);
+
+ __ Ldr(x3, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(0);
+ __ InvokeCode(x3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_FunctionApply");
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kReceiverOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+
+ Register args = x12;
+ Register receiver = x14;
+ Register function = x15;
+
+ // Get the length of the arguments via a builtin call.
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ __ Ldr(args, MemOperand(fp, kArgsOffset));
+ __ Push(function, args);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ Register argc = x0;
+
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+ Label enough_stack_space;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ // TODO(jbramley): Check that the stack usage here is safe.
+ __ Sub(x10, jssp, x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
+ __ B(gt, &enough_stack_space);
+ // There is not enough stack space, so use a builtin to throw an appropriate
+ // error.
+ __ Push(function, argc);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // We should never return from the APPLY_OVERFLOW builtin.
+ if (__ emit_debug_code()) {
+ __ Unreachable();
+ }
+
+ __ Bind(&enough_stack_space);
+ // Push current limit and index.
+ __ Mov(x1, 0); // Initial index.
+ __ Push(argc, x1);
+
+ Label push_receiver;
+ __ Ldr(receiver, MemOperand(fp, kReceiverOffset));
+
+ // Check that the function is a JS function. Otherwise it must be a proxy.
+ // When it is not the function proxy will be invoked later.
+ __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE,
+ &push_receiver);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // Load the shared function info.
+ __ Ldr(x2, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute and push the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label convert_receiver_to_object, use_global_receiver;
+ __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
+ // Do not transform the receiver for native functions.
+ __ Tbnz(x10, SharedFunctionInfo::kNative, &push_receiver);
+
+ // Compute the receiver in sloppy mode.
+ __ JumpIfSmi(receiver, &convert_receiver_to_object);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
+ &use_global_receiver);
+
+ // Check if the receiver is already a JavaScript object.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(receiver, x10, x11, FIRST_SPEC_OBJECT_TYPE,
+ &push_receiver, ge);
+
+ // Call a builtin to convert the receiver to a regular object.
+ __ Bind(&convert_receiver_to_object);
+ __ Push(receiver);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(receiver, x0);
+ __ B(&push_receiver);
+
+ __ Bind(&use_global_receiver);
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver
+ __ Bind(&push_receiver);
+ __ Push(receiver);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ Register current = x0;
+ __ Ldr(current, MemOperand(fp, kIndexOffset));
+ __ B(&entry);
+
+ __ Bind(&loop);
+ // Load the current argument from the arguments array and push it.
+ // TODO(all): Couldn't we optimize this for JS arrays?
+
+ __ Ldr(x1, MemOperand(fp, kArgsOffset));
+ __ Push(x1, current);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ Push(x0);
+
+ // Use inline caching to access the arguments.
+ __ Ldr(current, MemOperand(fp, kIndexOffset));
+ __ Add(current, current, Smi::FromInt(1));
+ __ Str(current, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ Bind(&entry);
+ __ Ldr(x1, MemOperand(fp, kLimitOffset));
+ __ Cmp(current, x1);
+ __ B(ne, &loop);
+
+ // At the end of the loop, the number of arguments is stored in 'current',
+ // represented as a smi.
+
+ function = x1; // From now on we want the function to be kept in x1;
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+
+ // Call the function.
+ Label call_proxy;
+ ParameterCount actual(current);
+ __ SmiUntag(current);
+ __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
+ frame_scope.GenerateLeaveFrame();
+ __ Drop(3);
+ __ Ret();
+
+ // Call the function proxy.
+ __ Bind(&call_proxy);
+ // x0 : argc
+ // x1 : function
+ __ Push(function); // Add function proxy as last argument.
+ __ Add(x0, x0, 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ }
+ __ Drop(3);
+ __ Ret();
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ SmiTag(x10, x0);
+ __ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Push(lr, fp);
+ __ Push(x11, x1, x10);
+ __ Add(fp, jssp,
+ StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then drop the parameters and the receiver.
+ __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
+ __ Mov(jssp, fp);
+ __ Pop(fp, lr);
+ __ DropBySMI(x10, kXRegSize);
+ __ Drop(1);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
+ // ----------- S t a t e -------------
+ // -- x0 : actual number of arguments
+ // -- x1 : function (passed through to callee)
+ // -- x2 : expected number of arguments
+ // -----------------------------------
+
+ Register argc_actual = x0; // Excluding the receiver.
+ Register argc_expected = x2; // Excluding the receiver.
+ Register function = x1;
+ Register code_entry = x3;
+
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ __ Cmp(argc_actual, argc_expected);
+ __ B(lt, &too_few);
+ __ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ B(eq, &dont_adapt_arguments);
+
+ { // Enough parameters: actual >= expected
+ EnterArgumentsAdaptorFrame(masm);
+
+ Register copy_start = x10;
+ Register copy_end = x11;
+ Register copy_to = x12;
+ Register scratch1 = x13, scratch2 = x14;
+
+ __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
+
+ // Adjust for fp, lr, and the receiver.
+ __ Add(copy_start, fp, 3 * kPointerSize);
+ __ Add(copy_start, copy_start, Operand(argc_actual, LSL, kPointerSizeLog2));
+ __ Sub(copy_end, copy_start, argc_expected);
+ __ Sub(copy_end, copy_end, kPointerSize);
+ __ Mov(copy_to, jssp);
+
+ // Claim space for the arguments, the receiver, and one extra slot.
+ // The extra slot ensures we do not write under jssp. It will be popped
+ // later.
+ __ Add(scratch1, argc_expected, 2 * kPointerSize);
+ __ Claim(scratch1, 1);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ Label copy_2_by_2;
+ __ Bind(&copy_2_by_2);
+ __ Ldp(scratch1, scratch2,
+ MemOperand(copy_start, - 2 * kPointerSize, PreIndex));
+ __ Stp(scratch1, scratch2,
+ MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ __ Cmp(copy_start, copy_end);
+ __ B(hi, &copy_2_by_2);
+
+ // Correct the space allocated for the extra slot.
+ __ Drop(1);
+
+ __ B(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected
+ __ Bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ Register copy_from = x10;
+ Register copy_end = x11;
+ Register copy_to = x12;
+ Register scratch1 = x13, scratch2 = x14;
+
+ __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
+ __ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
+
+ // Adjust for fp, lr, and the receiver.
+ __ Add(copy_from, fp, 3 * kPointerSize);
+ __ Add(copy_from, copy_from, argc_actual);
+ __ Mov(copy_to, jssp);
+ __ Sub(copy_end, copy_to, 1 * kPointerSize); // Adjust for the receiver.
+ __ Sub(copy_end, copy_end, argc_actual);
+
+ // Claim space for the arguments, the receiver, and one extra slot.
+ // The extra slot ensures we do not write under jssp. It will be popped
+ // later.
+ __ Add(scratch1, argc_expected, 2 * kPointerSize);
+ __ Claim(scratch1, 1);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ Label copy_2_by_2;
+ __ Bind(&copy_2_by_2);
+ __ Ldp(scratch1, scratch2,
+ MemOperand(copy_from, - 2 * kPointerSize, PreIndex));
+ __ Stp(scratch1, scratch2,
+ MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ __ Cmp(copy_to, copy_end);
+ __ B(hi, &copy_2_by_2);
+
+ __ Mov(copy_to, copy_end);
+
+ // Fill the remaining expected arguments with undefined.
+ __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+ __ Add(copy_end, jssp, kPointerSize);
+
+ Label fill;
+ __ Bind(&fill);
+ __ Stp(scratch1, scratch1,
+ MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ __ Cmp(copy_to, copy_end);
+ __ B(hi, &fill);
+
+ // Correct the space allocated for the extra slot.
+ __ Drop(1);
+ }
+
+ // Arguments have been adapted. Now call the entry point.
+ __ Bind(&invoke);
+ __ Call(code_entry);
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+
+ // Call the entry point without adapting the arguments.
+ __ Bind(&dont_adapt_arguments);
+ __ Jump(code_entry);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
new file mode 100644
index 0000000000..b097fc52ed
--- /dev/null
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -0,0 +1,5743 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: function info
+ static Register registers[] = { x2 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: function
+ static Register registers[] = { x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
+}
+
+
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x3: array literals array
+ // x2: array literal index
+ // x1: constant elements
+ static Register registers[] = { x3, x2, x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
+}
+
+
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x3: object literals array
+ // x2: object literal index
+ // x1: constant properties
+ // x0: object literal flags
+ static Register registers[] = { x3, x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
+}
+
+
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: feedback vector
+ // x3: call feedback slot
+ static Register registers[] = { x2, x3 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x0: key
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x0: key
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: length
+ // x1: index (of last match)
+ // x0: string
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: receiver
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ static Register registers[] = { x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { x0, x2 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: receiver
+ // x1: key
+ // x0: value
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value (js_array)
+ // x1: to_map
+ static Register registers[] = { x0, x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value to compare
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // x1: function
+ // x2: allocation site with elements kind
+ // x0: number of arguments to the constructor function
+ static Register registers_variable_args[] = { x1, x2, x0 };
+ static Register registers_no_args[] = { x1, x2 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ =
+ sizeof(registers_no_args) / sizeof(registers_no_args[0]);
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = x0;
+ descriptor->register_param_count_ =
+ sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
+ descriptor->register_params_ = registers_variable_args;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // x1: constructor function
+ // x0: number of arguments to the constructor function
+ static Register registers_variable_args[] = { x1, x0 };
+ static Register registers_no_args[] = { x1 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ =
+ sizeof(registers_no_args) / sizeof(registers_no_args[0]);
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = x0;
+ descriptor->register_param_count_ =
+ sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
+ descriptor->register_params_ = registers_variable_args;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x2: key (unused)
+ // x0: value
+ static Register registers[] = { x1, x2, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ // x3: target map
+ // x1: key
+ // x2: receiver
+ static Register registers[] = { x0, x3, x1, x2 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: allocation site
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ static PlatformCallInterfaceDescriptor default_descriptor =
+ PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ static PlatformCallInterfaceDescriptor noInlineDescriptor =
+ PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { x1, // JSFunction
+ cp, // context
+ x0, // actual number of arguments
+ x2, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { cp, // context
+ x2, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { cp, // context
+ x2, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { cp, // context
+ x0, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { x0, // callee
+ x4, // call_data
+ x2, // holder
+ x1, // api_function_address
+ cp, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Isolate* isolate = masm->isolate();
+ isolate->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT((descriptor->register_param_count_ == 0) ||
+ x0.Is(descriptor->register_params_[param_count - 1]));
+
+ // Push arguments
+ MacroAssembler::PushPopQueue queue(masm);
+ for (int i = 0; i < param_count; ++i) {
+ queue.Queue(descriptor->register_params_[i]);
+ }
+ queue.PushQueued();
+
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ Ret();
+}
+
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label done;
+ Register input = source();
+ Register result = destination();
+ ASSERT(is_truncating());
+
+ ASSERT(result.Is64Bits());
+ ASSERT(jssp.Is(masm->StackPointer()));
+
+ int double_offset = offset();
+
+ DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
+ Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
+ Register scratch2 =
+ GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
+
+ __ Push(scratch1, scratch2);
+ // Account for saved regs if input is jssp.
+ if (input.is(jssp)) double_offset += 2 * kPointerSize;
+
+ if (!skip_fastpath()) {
+ __ Push(double_scratch);
+ if (input.is(jssp)) double_offset += 1 * kDoubleSize;
+ __ Ldr(double_scratch, MemOperand(input, double_offset));
+ // Try to convert with a FPU convert instruction. This handles all
+ // non-saturating cases.
+ __ TryConvertDoubleToInt64(result, double_scratch, &done);
+ __ Fmov(result, double_scratch);
+ } else {
+ __ Ldr(result, MemOperand(input, double_offset));
+ }
+
+ // If we reach here we need to manually convert the input to an int32.
+
+ // Extract the exponent.
+ Register exponent = scratch1;
+ __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
+ HeapNumber::kExponentBits);
+
+ // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
+ // the mantissa gets shifted completely out of the int32_t result.
+ __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
+ __ CzeroX(result, ge);
+ __ B(ge, &done);
+
+ // The Fcvtzs sequence handles all cases except where the conversion causes
+ // signed overflow in the int64_t target. Since we've already handled
+ // exponents >= 84, we can guarantee that 63 <= exponent < 84.
+
+ if (masm->emit_debug_code()) {
+ __ Cmp(exponent, HeapNumber::kExponentBias + 63);
+ // Exponents less than this should have been handled by the Fcvt case.
+ __ Check(ge, kUnexpectedValue);
+ }
+
+ // Isolate the mantissa bits, and set the implicit '1'.
+ Register mantissa = scratch2;
+ __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
+ __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
+
+ // Negate the mantissa if necessary.
+ __ Tst(result, kXSignMask);
+ __ Cneg(mantissa, mantissa, ne);
+
+ // Shift the mantissa bits in the correct place. We know that we have to shift
+ // it left here, because exponent >= 63 >= kMantissaBits.
+ __ Sub(exponent, exponent,
+ HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
+ __ Lsl(result, mantissa, exponent);
+
+ __ Bind(&done);
+ if (!skip_fastpath()) {
+ __ Pop(double_scratch);
+ }
+ __ Pop(scratch2, scratch1);
+ __ Ret();
+}
+
+
+// See call site for description.
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch,
+ FPRegister double_scratch,
+ Label* slow,
+ Condition cond) {
+ ASSERT(!AreAliased(left, right, scratch));
+ Label not_identical, return_equal, heap_number;
+ Register result = x0;
+
+ __ Cmp(right, left);
+ __ B(ne, &not_identical);
+
+ // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if ((cond == lt) || (cond == gt)) {
+ __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
+ ge);
+ } else {
+ Register right_type = scratch;
+ __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
+ &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if ((cond == le) || (cond == ge)) {
+ __ Cmp(right_type, ODDBALL_TYPE);
+ __ B(ne, &return_equal);
+ __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ Mov(result, GREATER);
+ } else {
+ // undefined >= undefined should fail.
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+ }
+ }
+ }
+
+ __ Bind(&return_equal);
+ if (cond == lt) {
+ __ Mov(result, GREATER); // Things aren't less than themselves.
+ } else if (cond == gt) {
+ __ Mov(result, LESS); // Things aren't greater than themselves.
+ } else {
+ __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ // Cases lt and gt have been handled earlier, and case ne is never seen, as
+ // it is handled in the parser (see Parser::ParseBinaryExpression). We are
+ // only concerned with cases ge, le and eq here.
+ if ((cond != lt) && (cond != gt)) {
+ ASSERT((cond == ge) || (cond == le) || (cond == eq));
+ __ Bind(&heap_number);
+ // Left and right are identical pointers to a heap number object. Return
+ // non-equal if the heap number is a NaN, and equal otherwise. Comparing
+ // the number to itself will set the overflow flag iff the number is NaN.
+ __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
+ __ Fcmp(double_scratch, double_scratch);
+ __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
+
+ if (cond == le) {
+ __ Mov(result, GREATER);
+ } else {
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+ }
+
+ // No fall through here.
+ if (FLAG_debug_code) {
+ __ Unreachable();
+ }
+
+ __ Bind(&not_identical);
+}
+
+
+// See call site for description.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register left_type,
+ Register right_type,
+ Register scratch) {
+ ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
+
+ if (masm->emit_debug_code()) {
+ // We assume that the arguments are not identical.
+ __ Cmp(left, right);
+ __ Assert(ne, kExpectedNonIdenticalObjects);
+ }
+
+ // If either operand is a JS object or an oddball value, then they are not
+ // equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ Label right_non_object;
+
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, &right_non_object);
+
+ // Return non-zero - x0 already contains a non-zero pointer.
+ ASSERT(left.is(x0) || right.is(x0));
+ Label return_not_equal;
+ __ Bind(&return_not_equal);
+ __ Ret();
+
+ __ Bind(&right_non_object);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ Cmp(right_type, ODDBALL_TYPE);
+
+ // If right is not ODDBALL, test left. Otherwise, set eq condition.
+ __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
+
+ // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
+ // Otherwise, right or left is ODDBALL, so set a ge condition.
+ __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
+
+ __ B(ge, &return_not_equal);
+
+ // Internalized strings are unique, so they can only be equal if they are the
+ // same object. We have already tested that case, so if left and right are
+ // both internalized strings, they cannot be equal.
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ __ Orr(scratch, left_type, right_type);
+ __ TestAndBranchIfAllClear(
+ scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
+}
+
+
+// See call site for description.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register left,
+ Register right,
+ FPRegister left_d,
+ FPRegister right_d,
+ Register scratch,
+ Label* slow,
+ bool strict) {
+ ASSERT(!AreAliased(left, right, scratch));
+ ASSERT(!AreAliased(left_d, right_d));
+ ASSERT((left.is(x0) && right.is(x1)) ||
+ (right.is(x0) && left.is(x1)));
+ Register result = x0;
+
+ Label right_is_smi, done;
+ __ JumpIfSmi(right, &right_is_smi);
+
+ // Left is the smi. Check whether right is a heap number.
+ if (strict) {
+ // If right is not a number and left is a smi, then strict equality cannot
+ // succeed. Return non-equal.
+ Label is_heap_number;
+ __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
+ &is_heap_number);
+ // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
+ if (!right.is(result)) {
+ __ Mov(result, NOT_EQUAL);
+ }
+ __ Ret();
+ __ Bind(&is_heap_number);
+ } else {
+ // Smi compared non-strictly with a non-smi, non-heap-number. Call the
+ // runtime.
+ __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ }
+
+ // Left is the smi. Right is a heap number. Load right value into right_d, and
+ // convert left smi into double in left_d.
+ __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(left_d, left);
+ __ B(&done);
+
+ __ Bind(&right_is_smi);
+ // Right is a smi. Check whether the non-smi left is a heap number.
+ if (strict) {
+ // If left is not a number and right is a smi then strict equality cannot
+ // succeed. Return non-equal.
+ Label is_heap_number;
+ __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
+ &is_heap_number);
+ // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
+ if (!left.is(result)) {
+ __ Mov(result, NOT_EQUAL);
+ }
+ __ Ret();
+ __ Bind(&is_heap_number);
+ } else {
+ // Smi compared non-strictly with a non-smi, non-heap-number. Call the
+ // runtime.
+ __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ }
+
+ // Right is the smi. Left is a heap number. Load left value into left_d, and
+ // convert right smi into double in right_d.
+ __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(right_d, right);
+
+ // Fall through to both_loaded_as_doubles.
+ __ Bind(&done);
+}
+
+
+// Fast negative check for internalized-to-internalized equality.
+// See call site for description.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register left_map,
+ Register right_map,
+ Register left_type,
+ Register right_type,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
+ Register result = x0;
+
+ Label object_test;
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ // TODO(all): reexamine this branch sequence for optimisation wrt branch
+ // prediction.
+ __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
+ __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
+
+ // Both are internalized. We already checked that they weren't the same
+ // pointer, so they are not equal.
+ __ Mov(result, NOT_EQUAL);
+ __ Ret();
+
+ __ Bind(&object_test);
+
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+
+ // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
+ // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
+ __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
+
+ __ B(lt, not_both_strings);
+
+ // If both objects are undetectable, they are equal. Otherwise, they are not
+ // equal, since they are different objects and an object is not equal to
+ // undefined.
+
+ // Returning here, so we can corrupt right_type and left_type.
+ Register right_bitfield = right_type;
+ Register left_bitfield = left_type;
+ __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
+ __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
+ __ And(result, right_bitfield, left_bitfield);
+ __ And(result, result, 1 << Map::kIsUndetectable);
+ __ Eor(result, result, 1 << Map::kIsUndetectable);
+ __ Ret();
+}
+
+
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ Bind(&ok);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = x1;
+ Register rhs = x0;
+ Register result = x0;
+ Condition cond = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+ Label not_two_smis, smi_done;
+ __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
+ __ SmiUntag(lhs);
+ __ Sub(result, lhs, Operand::UntagSmi(rhs));
+ __ Ret();
+
+ __ Bind(&not_two_smis);
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so it is
+ // certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
+
+ // If either is a smi (we know that at least one is not a smi), then they can
+ // only be strictly equal if the other is a HeapNumber.
+ __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
+
+ // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
+ // can:
+ // 1) Return the answer.
+ // 2) Branch to the slow case.
+ // 3) Fall through to both_loaded_as_doubles.
+ // In case 3, we have found out that we were dealing with a number-number
+ // comparison. The double values of the numbers have been loaded, right into
+ // rhs_d, left into lhs_d.
+ FPRegister rhs_d = d0;
+ FPRegister lhs_d = d1;
+ EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
+
+ __ Bind(&both_loaded_as_doubles);
+ // The arguments have been converted to doubles and stored in rhs_d and
+ // lhs_d.
+ Label nan;
+ __ Fcmp(lhs_d, rhs_d);
+ __ B(vs, &nan); // Overflow flag set if either is NaN.
+ STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
+ __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
+ __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
+ __ Ret();
+
+ __ Bind(&nan);
+ // Left and/or right is a NaN. Load the result register with whatever makes
+ // the comparison fail, since comparisons with NaN always fail (except ne,
+ // which is filtered out at a higher level.)
+ ASSERT(cond != ne);
+ if ((cond == lt) || (cond == le)) {
+ __ Mov(result, GREATER);
+ } else {
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+
+ __ Bind(&not_smis);
+ // At this point we know we are dealing with two different objects, and
+ // neither of them is a smi. The objects are in rhs_ and lhs_.
+
+ // Load the maps and types of the objects.
+ Register rhs_map = x10;
+ Register rhs_type = x11;
+ Register lhs_map = x12;
+ Register lhs_type = x13;
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+
+ if (strict()) {
+ // This emits a non-equal return sequence for some object types, or falls
+ // through if it was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
+ }
+
+ Label check_for_internalized_strings;
+ Label flat_string_check;
+ // Check for heap number comparison. Branch to earlier double comparison code
+ // if they are heap numbers, otherwise, branch to internalized string check.
+ __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
+ __ B(ne, &check_for_internalized_strings);
+ __ Cmp(lhs_map, rhs_map);
+
+ // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
+ // string check.
+ __ B(ne, &flat_string_check);
+
+ // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
+ // comparison code.
+ __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ B(&both_loaded_as_doubles);
+
+ __ Bind(&check_for_internalized_strings);
+ // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
+ // of internalized strings.
+ if ((cond == eq) && !strict()) {
+ // Returns an answer for two internalized strings or two detectable objects.
+ // Otherwise branches to the string case or not both strings case.
+ EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
+ lhs_type, rhs_type,
+ &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ Bind(&flat_string_check);
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
+ x15, &slow);
+
+ Isolate* isolate = masm->isolate();
+ __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10,
+ x11);
+ if (cond == eq) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
+ x10, x11, x12);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
+ x10, x11, x12, x13);
+ }
+
+ // Never fall through to here.
+ if (FLAG_debug_code) {
+ __ Unreachable();
+ }
+
+ __ Bind(&slow);
+
+ __ Push(lhs, rhs);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cond == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if ((cond == lt) || (cond == le)) {
+ ncr = GREATER;
+ } else {
+ ASSERT((cond == gt) || (cond == ge)); // remaining cases
+ ncr = LESS;
+ }
+ __ Mov(x10, Smi::FromInt(ncr));
+ __ Push(x10);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9,
+ // ip0 and ip1 are corrupted by the call into C.
+ CPURegList saved_regs = kCallerSaved;
+ saved_regs.Remove(ip0);
+ saved_regs.Remove(ip1);
+ saved_regs.Remove(x8);
+ saved_regs.Remove(x9);
+
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ PushCPURegList(saved_regs);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushCPURegList(kCallerSavedFP);
+ }
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(x0, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ 1, 0);
+
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopCPURegList(kCallerSavedFP);
+ }
+ __ PopCPURegList(saved_regs);
+ __ Ret();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode(isolate);
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate);
+}
+
+
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ UseScratchRegisterScope temps(masm);
+ Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
+ Register return_address = temps.AcquireX();
+ __ Mov(return_address, lr);
+ // Restore lr with the value it had before the call to this stub (the value
+ // which must be pushed).
+ __ Mov(lr, saved_lr);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushSafepointRegistersAndDoubles();
+ } else {
+ __ PushSafepointRegisters();
+ }
+ __ Ret(return_address);
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ UseScratchRegisterScope temps(masm);
+ Register return_address = temps.AcquireX();
+ // Preserve the return address (lr will be clobbered by the pop).
+ __ Mov(return_address, lr);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopSafepointRegistersAndDoubles();
+ } else {
+ __ PopSafepointRegisters();
+ }
+ __ Ret(return_address);
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ // Stack on entry:
+ // jssp[0]: Exponent (as a tagged value).
+ // jssp[1]: Base (as a tagged value).
+ //
+ // The (tagged) result will be returned in x0, as a heap number.
+
+ Register result_tagged = x0;
+ Register base_tagged = x10;
+ Register exponent_tagged = x11;
+ Register exponent_integer = x12;
+ Register scratch1 = x14;
+ Register scratch0 = x15;
+ Register saved_lr = x19;
+ FPRegister result_double = d0;
+ FPRegister base_double = d0;
+ FPRegister exponent_double = d1;
+ FPRegister base_double_copy = d2;
+ FPRegister scratch1_double = d6;
+ FPRegister scratch0_double = d7;
+
+ // A fast-path for integer exponents.
+ Label exponent_is_smi, exponent_is_integer;
+ // Bail out to runtime.
+ Label call_runtime;
+ // Allocate a heap number for the result, and return it.
+ Label done;
+
+ // Unpack the inputs.
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi;
+ Label unpack_exponent;
+
+ __ Pop(exponent_tagged, base_tagged);
+
+ __ JumpIfSmi(base_tagged, &base_is_smi);
+ __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
+ // base_tagged is a heap number, so load its double value.
+ __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
+ __ B(&unpack_exponent);
+ __ Bind(&base_is_smi);
+ // base_tagged is a SMI, so untag it and convert it to a double.
+ __ SmiUntagToDouble(base_double, base_tagged);
+
+ __ Bind(&unpack_exponent);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
+ __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
+ // exponent_tagged is a heap number, so load its double value.
+ __ Ldr(exponent_double,
+ FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
+ __ Ldr(exponent_double,
+ FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
+ }
+
+ // Handle double (heap number) exponents.
+ if (exponent_type_ != INTEGER) {
+ // Detect integer exponents stored as doubles and handle those in the
+ // integer fast-path.
+ __ TryConvertDoubleToInt64(exponent_integer, exponent_double,
+ scratch0_double, &exponent_is_integer);
+
+ if (exponent_type_ == ON_STACK) {
+ FPRegister half_double = d3;
+ FPRegister minus_half_double = d4;
+ // Detect square root case. Crankshaft detects constant +/-0.5 at compile
+ // time and uses DoMathPowHalf instead. We then skip this check for
+ // non-constant cases of +/-0.5 as these hardly occur.
+
+ __ Fmov(minus_half_double, -0.5);
+ __ Fmov(half_double, 0.5);
+ __ Fcmp(minus_half_double, exponent_double);
+ __ Fccmp(half_double, exponent_double, NZFlag, ne);
+ // Condition flags at this point:
+ // 0.5; nZCv // Identified by eq && pl
+ // -0.5: NZcv // Identified by eq && mi
+ // other: ?z?? // Identified by ne
+ __ B(ne, &call_runtime);
+
+ // The exponent is 0.5 or -0.5.
+
+ // Given that exponent is known to be either 0.5 or -0.5, the following
+ // special cases could apply (according to ECMA-262 15.8.2.13):
+ //
+ // base.isNaN(): The result is NaN.
+ // (base == +INFINITY) || (base == -INFINITY)
+ // exponent == 0.5: The result is +INFINITY.
+ // exponent == -0.5: The result is +0.
+ // (base == +0) || (base == -0)
+ // exponent == 0.5: The result is +0.
+ // exponent == -0.5: The result is +INFINITY.
+ // (base < 0) && base.isFinite(): The result is NaN.
+ //
+ // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
+ // where base is -INFINITY or -0.
+
+ // Add +0 to base. This has no effect other than turning -0 into +0.
+ __ Fadd(base_double, base_double, fp_zero);
+ // The operation -0+0 results in +0 in all cases except where the
+ // FPCR rounding mode is 'round towards minus infinity' (RM). The
+ // ARM64 simulator does not currently simulate FPCR (where the rounding
+ // mode is set), so test the operation with some debug code.
+ if (masm->emit_debug_code()) {
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+ __ Fneg(scratch0_double, fp_zero);
+ // Verify that we correctly generated +0.0 and -0.0.
+ // bits(+0.0) = 0x0000000000000000
+ // bits(-0.0) = 0x8000000000000000
+ __ Fmov(temp, fp_zero);
+ __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
+ __ Fmov(temp, scratch0_double);
+ __ Eor(temp, temp, kDSignMask);
+ __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
+ // Check that -0.0 + 0.0 == +0.0.
+ __ Fadd(scratch0_double, scratch0_double, fp_zero);
+ __ Fmov(temp, scratch0_double);
+ __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
+ }
+
+ // If base is -INFINITY, make it +INFINITY.
+ // * Calculate base - base: All infinities will become NaNs since both
+ // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
+ // * If the result is NaN, calculate abs(base).
+ __ Fsub(scratch0_double, base_double, base_double);
+ __ Fcmp(scratch0_double, 0.0);
+ __ Fabs(scratch1_double, base_double);
+ __ Fcsel(base_double, scratch1_double, base_double, vs);
+
+ // Calculate the square root of base.
+ __ Fsqrt(result_double, base_double);
+ __ Fcmp(exponent_double, 0.0);
+ __ B(ge, &done); // Finish now for exponents of 0.5.
+ // Find the inverse for exponents of -0.5.
+ __ Fmov(scratch0_double, 1.0);
+ __ Fdiv(result_double, scratch0_double, result_double);
+ __ B(&done);
+ }
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ B(&done);
+ }
+
+ // Handle SMI exponents.
+ __ Bind(&exponent_is_smi);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ SmiUntag(exponent_integer, exponent_tagged);
+ }
+
+ __ Bind(&exponent_is_integer);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // x12 exponent_integer The exponent as an integer.
+ // d1 base_double The base as a double.
+
+ // Find abs(exponent). For negative exponents, we can find the inverse later.
+ Register exponent_abs = x13;
+ __ Cmp(exponent_integer, 0);
+ __ Cneg(exponent_abs, exponent_integer, mi);
+ // x13 exponent_abs The value of abs(exponent_integer).
+
+ // Repeatedly multiply to calculate the power.
+ // result = 1.0;
+ // For each bit n (exponent_integer{n}) {
+ // if (exponent_integer{n}) {
+ // result *= base;
+ // }
+ // base *= base;
+ // if (remaining bits in exponent_integer are all zero) {
+ // break;
+ // }
+ // }
+ Label power_loop, power_loop_entry, power_loop_exit;
+ __ Fmov(scratch1_double, base_double);
+ __ Fmov(base_double_copy, base_double);
+ __ Fmov(result_double, 1.0);
+ __ B(&power_loop_entry);
+
+ __ Bind(&power_loop);
+ __ Fmul(scratch1_double, scratch1_double, scratch1_double);
+ __ Lsr(exponent_abs, exponent_abs, 1);
+ __ Cbz(exponent_abs, &power_loop_exit);
+
+ __ Bind(&power_loop_entry);
+ __ Tbz(exponent_abs, 0, &power_loop);
+ __ Fmul(result_double, result_double, scratch1_double);
+ __ B(&power_loop);
+
+ __ Bind(&power_loop_exit);
+
+ // If the exponent was positive, result_double holds the result.
+ __ Tbz(exponent_integer, kXSignBit, &done);
+
+ // The exponent was negative, so find the inverse.
+ __ Fmov(scratch0_double, 1.0);
+ __ Fdiv(result_double, scratch0_double, result_double);
+ // ECMA-262 only requires Math.pow to return an 'implementation-dependent
+ // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
+ // to calculate the subnormal value 2^-1074. This method of calculating
+ // negative powers doesn't work because 2^1074 overflows to infinity. To
+ // catch this corner-case, we bail out if the result was 0. (This can only
+ // occur if the divisor is infinity or the base is zero.)
+ __ Fcmp(result_double, 0.0);
+ __ B(&done, ne);
+
+ if (exponent_type_ == ON_STACK) {
+ // Bail out to runtime code.
+ __ Bind(&call_runtime);
+ // Put the arguments back on the stack.
+ __ Push(base_tagged, exponent_tagged);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+ // Return.
+ __ Bind(&done);
+ __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1);
+ __ Str(result_double,
+ FieldMemOperand(result_tagged, HeapNumber::kValueOffset));
+ ASSERT(result_tagged.is(x0));
+ __ IncrementCounter(
+ masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ __ Ret();
+ } else {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ Fmov(base_double, base_double_copy);
+ __ Scvtf(exponent_double, exponent_integer);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ Bind(&done);
+ __ IncrementCounter(
+ masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ __ Ret();
+ }
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ // It is important that the following stubs are generated in this order
+ // because pregenerated stubs can only call other pregenerated stubs.
+ // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
+ // CEntryStub.
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ StoreRegistersStateStub stub1(kDontSaveFPRegs);
+ stub1.GetCode(isolate);
+ StoreRegistersStateStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate);
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ RestoreRegistersStateStub stub1(kDontSaveFPRegs);
+ stub1.GetCode(isolate);
+ RestoreRegistersStateStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate);
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Floating-point code doesn't get special handling in ARM64, so there's
+ // nothing to do here.
+ USE(isolate);
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ // CEntryStub stores the return address on the stack before calling into
+ // C++ code. In some cases, the VM accesses this address, but it is not used
+ // when the C++ code returns to the stub because LR holds the return address
+ // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
+ // returning to dead code.
+ // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
+ // find any comment to confirm this, and I don't hit any crashes whatever
+ // this function returns. The anaylsis should be properly confirmed.
+ return true;
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ stub.GetCode(isolate);
+ CEntryStub stub_fp(1, kSaveFPRegs);
+ stub_fp.GetCode(isolate);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal,
+ Label* throw_termination,
+ bool do_gc,
+ bool always_allocate) {
+ // x0 : Result parameter for PerformGC, if do_gc is true.
+ // x21 : argv
+ // x22 : argc
+ // x23 : target
+ //
+ // The stack (on entry) holds the arguments and the receiver, with the
+ // receiver at the highest address:
+ //
+ // argv[8]: receiver
+ // argv -> argv[0]: arg[argc-2]
+ // ... ...
+ // argv[...]: arg[1]
+ // argv[...]: arg[0]
+ //
+ // Immediately below (after) this is the exit frame, as constructed by
+ // EnterExitFrame:
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // csp[...]: Saved doubles, if saved_doubles is true.
+ // csp[32]: Alignment padding, if necessary.
+ // csp[24]: Preserved x23 (used for target).
+ // csp[16]: Preserved x22 (used for argc).
+ // csp[8]: Preserved x21 (used for argv).
+ // csp -> csp[0]: Space reserved for the return address.
+ //
+ // After a successful call, the exit frame, preserved registers (x21-x23) and
+ // the arguments (including the receiver) are dropped or popped as
+ // appropriate. The stub then returns.
+ //
+ // After an unsuccessful call, the exit frame and suchlike are left
+ // untouched, and the stub either throws an exception by jumping to one of
+ // the provided throw_ labels, or it falls through. The failure details are
+ // passed through in x0.
+ ASSERT(csp.Is(__ StackPointer()));
+
+ Isolate* isolate = masm->isolate();
+
+ const Register& argv = x21;
+ const Register& argc = x22;
+ const Register& target = x23;
+
+ if (do_gc) {
+ // Call Runtime::PerformGC, passing x0 (the result parameter for
+ // PerformGC) and x1 (the isolate).
+ __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(
+ ExternalReference::perform_gc_function(isolate), 2, 0);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth(isolate);
+ if (always_allocate) {
+ __ Mov(x10, Operand(scope_depth));
+ __ Ldr(x11, MemOperand(x10));
+ __ Add(x11, x11, 1);
+ __ Str(x11, MemOperand(x10));
+ }
+
+ // Prepare AAPCS64 arguments to pass to the builtin.
+ __ Mov(x0, argc);
+ __ Mov(x1, argv);
+ __ Mov(x2, ExternalReference::isolate_address(isolate));
+
+ // Store the return address on the stack, in the space previously allocated
+ // by EnterExitFrame. The return address is queried by
+ // ExitFrame::GetStateForFramePointer.
+ Label return_location;
+ __ Adr(x12, &return_location);
+ __ Poke(x12, 0);
+ if (__ emit_debug_code()) {
+ // Verify that the slot below fp[kSPOffset]-8 points to the return location
+ // (currently in x12).
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+ __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
+ __ Cmp(temp, x12);
+ __ Check(eq, kReturnAddressNotFoundInFrame);
+ }
+
+ // Call the builtin.
+ __ Blr(target);
+ __ Bind(&return_location);
+ const Register& result = x0;
+
+ if (always_allocate) {
+ __ Mov(x10, Operand(scope_depth));
+ __ Ldr(x11, MemOperand(x10));
+ __ Sub(x11, x11, 1);
+ __ Str(x11, MemOperand(x10));
+ }
+
+ // x0 result The return code from the call.
+ // x21 argv
+ // x22 argc
+ // x23 target
+ //
+ // If all of the result bits matching kFailureTagMask are '1', the result is
+ // a failure. Otherwise, it's an ordinary tagged object and the call was a
+ // success.
+ Label failure;
+ __ And(x10, result, kFailureTagMask);
+ __ Cmp(x10, kFailureTagMask);
+ __ B(&failure, eq);
+
+ // The call succeeded, so unwind the stack and return.
+
+ // Restore callee-saved registers x21-x23.
+ __ Mov(x11, argc);
+
+ __ Peek(argv, 1 * kPointerSize);
+ __ Peek(argc, 2 * kPointerSize);
+ __ Peek(target, 3 * kPointerSize);
+
+ __ LeaveExitFrame(save_doubles_, x10, true);
+ ASSERT(jssp.Is(__ StackPointer()));
+ // Pop or drop the remaining stack slots and return from the stub.
+ // jssp[24]: Arguments array (of size argc), including receiver.
+ // jssp[16]: Preserved x23 (used for target).
+ // jssp[8]: Preserved x22 (used for argc).
+ // jssp[0]: Preserved x21 (used for argv).
+ __ Drop(x11);
+ __ Ret();
+
+ // The stack pointer is still csp if we aren't returning, and the frame
+ // hasn't changed (except for the return address).
+ __ SetStackPointer(csp);
+
+ __ Bind(&failure);
+ // The call failed, so check if we need to throw an exception, and fall
+ // through (to retry) otherwise.
+
+ Label retry;
+ // x0 result The return code from the call, including the failure
+ // code and details.
+ // x21 argv
+ // x22 argc
+ // x23 target
+ // Refer to the Failure class for details of the bit layout.
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ Tst(result, kFailureTypeTagMask << kFailureTagSize);
+ __ B(eq, &retry); // RETRY_AFTER_GC
+
+ // Retrieve the pending exception.
+ const Register& exception = result;
+ const Register& exception_address = x11;
+ __ Mov(exception_address,
+ Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Ldr(exception, MemOperand(exception_address));
+
+ // Clear the pending exception.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Str(x10, MemOperand(exception_address));
+
+ // x0 exception The exception descriptor.
+ // x21 argv
+ // x22 argc
+ // x23 target
+
+ // Special handling of termination exceptions, which are uncatchable by
+ // JavaScript code.
+ __ Cmp(exception, Operand(isolate->factory()->termination_exception()));
+ __ B(eq, throw_termination);
+
+ // Handle normal exception.
+ __ B(throw_normal);
+
+ __ Bind(&retry);
+ // The result (x0) is passed through as the next PerformGC parameter.
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // The Abort mechanism relies on CallRuntime, which in turn relies on
+ // CEntryStub, so until this stub has been generated, we have to use a
+ // fall-back Abort mechanism.
+ //
+ // Note that this stub must be generated before any use of Abort.
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+
+ ASM_LOCATION("CEntryStub::Generate entry");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Register parameters:
+ // x0: argc (including receiver, untagged)
+ // x1: target
+ //
+ // The stack on entry holds the arguments and the receiver, with the receiver
+ // at the highest address:
+ //
+ // jssp]argc-1]: receiver
+ // jssp[argc-2]: arg[argc-2]
+ // ... ...
+ // jssp[1]: arg[1]
+ // jssp[0]: arg[0]
+ //
+ // The arguments are in reverse order, so that arg[argc-2] is actually the
+ // first argument to the target function and arg[0] is the last.
+ ASSERT(jssp.Is(__ StackPointer()));
+ const Register& argc_input = x0;
+ const Register& target_input = x1;
+
+ // Calculate argv, argc and the target address, and store them in
+ // callee-saved registers so we can retry the call without having to reload
+ // these arguments.
+ // TODO(jbramley): If the first call attempt succeeds in the common case (as
+ // it should), then we might be better off putting these parameters directly
+ // into their argument registers, rather than using callee-saved registers and
+ // preserving them on the stack.
+ const Register& argv = x21;
+ const Register& argc = x22;
+ const Register& target = x23;
+
+ // Derive argv from the stack pointer so that it points to the first argument
+ // (arg[argc-2]), or just below the receiver in case there are no arguments.
+ // - Adjust for the arg[] array.
+ Register temp_argv = x11;
+ __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ // - Adjust for the receiver.
+ __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+
+ // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
+ // registers.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles_, x10, 3);
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // Poke callee-saved registers into reserved space.
+ __ Poke(argv, 1 * kPointerSize);
+ __ Poke(argc, 2 * kPointerSize);
+ __ Poke(target, 3 * kPointerSize);
+
+ // We normally only keep tagged values in callee-saved registers, as they
+ // could be pushed onto the stack by called stubs and functions, and on the
+ // stack they can confuse the GC. However, we're only calling C functions
+ // which can push arbitrary data onto the stack anyway, and so the GC won't
+ // examine that part of the stack.
+ __ Mov(argc, argc_input);
+ __ Mov(target, target_input);
+ __ Mov(argv, temp_argv);
+
+ Label throw_normal;
+ Label throw_termination;
+
+ // Call the runtime function.
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ false,
+ false);
+
+ // If successful, the previous GenerateCore will have returned to the
+ // calling code. Otherwise, we fall through into the following.
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError()));
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ true,
+ true);
+
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0);
+ }
+
+ // We didn't execute a return case, so the stack frame hasn't been updated
+ // (except for the return address slot). However, we don't need to initialize
+ // jssp because the throw method will immediately overwrite it when it
+ // unwinds the stack.
+ __ SetStackPointer(jssp);
+
+ // Throw exceptions.
+ // If we throw an exception, we can end up re-entering CEntryStub before we
+ // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values
+ // here.
+
+ __ Bind(&throw_termination);
+ ASM_LOCATION("Throw termination");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ __ ThrowUncatchable(x0, x10, x11, x12, x13);
+
+ __ Bind(&throw_normal);
+ ASM_LOCATION("Throw normal");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ __ Throw(x0, x10, x11, x12, x13);
+}
+
+
+// This is the entry point from C++. 5 arguments are provided in x0-x4.
+// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
+// Input:
+// x0: code entry.
+// x1: function.
+// x2: receiver.
+// x3: argc.
+// x4: argv.
+// Output:
+// x0: result.
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ ASSERT(jssp.Is(__ StackPointer()));
+ Register code_entry = x0;
+
+ // Enable instruction instrumentation. This only works on the simulator, and
+ // will have no effect on the model or real hardware.
+ __ EnableInstrumentation();
+
+ Label invoke, handler_entry, exit;
+
+ // Push callee-saved registers and synchronize the system stack pointer (csp)
+ // and the JavaScript stack pointer (jssp).
+ //
+ // We must not write to jssp until after the PushCalleeSavedRegisters()
+ // call, since jssp is itself a callee-saved register.
+ __ SetStackPointer(csp);
+ __ PushCalleeSavedRegisters();
+ __ Mov(jssp, csp);
+ __ SetStackPointer(jssp);
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Set up the reserved register for 0.0.
+ __ Fmov(fp_zero, 0.0);
+
+ // Build an entry frame (see layout below).
+ Isolate* isolate = masm->isolate();
+
+ // Build an entry frame.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
+ __ Mov(x13, bad_frame_pointer);
+ __ Mov(x12, Smi::FromInt(marker));
+ __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate));
+ __ Ldr(x10, MemOperand(x11));
+
+ __ Push(x13, xzr, x12, x10);
+ // Set up fp.
+ __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
+
+ // Push the JS entry frame marker. Also set js_entry_sp if this is the
+ // outermost JS call.
+ Label non_outermost_js, done;
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ __ Mov(x10, ExternalReference(js_entry_sp));
+ __ Ldr(x11, MemOperand(x10));
+ __ Cbnz(x11, &non_outermost_js);
+ __ Str(fp, MemOperand(x10));
+ __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Push(x12);
+ __ B(&done);
+ __ Bind(&non_outermost_js);
+ // We spare one instruction by pushing xzr since the marker is 0.
+ ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+ __ Push(xzr);
+ __ Bind(&done);
+
+ // The frame set up looks like this:
+ // jssp[0] : JS entry frame marker.
+ // jssp[1] : C entry FP.
+ // jssp[2] : stack frame marker.
+ // jssp[3] : stack frmae marker.
+ // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ B(&invoke);
+
+ // Prevent the constant pool from being emitted between the record of the
+ // handler_entry position and the first instruction of the sequence here.
+ // There is no risk because Assembler::Emit() emits the instruction before
+ // checking for constant pool emission, but we do not want to depend on
+ // that.
+ {
+ Assembler::BlockPoolsScope block_pools(masm);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ }
+ __ Str(code_entry, MemOperand(x10));
+ __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception())));
+ __ B(&exit);
+
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
+ __ Bind(&invoke);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the B(&invoke) above, which
+ // restores all callee-saved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Str(x10, MemOperand(x11));
+
+ // Invoke the function by calling through the JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // x0: code entry.
+ // x1: function.
+ // x2: receiver.
+ // x3: argc.
+ // x4: argv.
+ ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
+ : Builtins::kJSEntryTrampoline,
+ isolate);
+ __ Mov(x10, entry);
+
+ // Call the JSEntryTrampoline.
+ __ Ldr(x11, MemOperand(x10)); // Dereference the address.
+ __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+ __ Blr(x12);
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+
+ __ Bind(&exit);
+ // x0 holds the result.
+ // The stack pointer points to the top of the entry frame pushed on entry from
+ // C++ (at the beginning of this stub):
+ // jssp[0] : JS entry frame marker.
+ // jssp[1] : C entry FP.
+ // jssp[2] : stack frame marker.
+ // jssp[3] : stack frmae marker.
+ // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ Pop(x10);
+ __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ B(ne, &non_outermost_js_2);
+ __ Mov(x11, ExternalReference(js_entry_sp));
+ __ Str(xzr, MemOperand(x11));
+ __ Bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ Pop(x10);
+ __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate));
+ __ Str(x10, MemOperand(x11));
+
+ // Reset the stack to the callee saved registers.
+ __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
+ // Restore the callee-saved registers and return.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+ __ PopCalleeSavedRegisters();
+ // After this point, we must not modify jssp because it is a callee-saved
+ // register which we have just restored.
+ __ Ret();
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x1 : receiver
+ // -- x0 : key
+ // -----------------------------------
+ Register key = x0;
+ receiver = x1;
+ __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string()));
+ __ B(ne, &miss);
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : name
+ // -- x0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = x0;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
+
+ __ Bind(&miss);
+ StubCompiler::TailCallBuiltin(masm,
+ BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Stack on entry:
+ // jssp[0]: function.
+ // jssp[8]: object.
+ //
+ // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
+ // instanceof.
+
+ Register result = x0;
+ Register function = right();
+ Register object = left();
+ Register scratch1 = x6;
+ Register scratch2 = x7;
+ Register res_true = x8;
+ Register res_false = x9;
+ // Only used if there was an inline map check site. (See
+ // LCodeGen::DoInstanceOfKnownGlobal().)
+ Register map_check_site = x4;
+ // Delta for the instructions generated between the inline map check and the
+ // instruction setting the result.
+ const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
+
+ Label not_js_object, slow;
+
+ if (!HasArgsInRegisters()) {
+ __ Pop(function, object);
+ }
+
+ if (ReturnTrueFalseObject()) {
+ __ LoadTrueFalseRoots(res_true, res_false);
+ } else {
+ // This is counter-intuitive, but correct.
+ __ Mov(res_true, Smi::FromInt(0));
+ __ Mov(res_false, Smi::FromInt(1));
+ }
+
+ // Check that the left hand side is a JS object and load its map as a side
+ // effect.
+ Register map = x12;
+ __ JumpIfSmi(object, &not_js_object);
+ __ IsObjectJSObjectType(object, map, scratch2, &not_js_object);
+
+ // If there is a call site cache, don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ Label miss;
+ __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
+ __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
+ __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
+ __ Bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ Register prototype = x13;
+ __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
+ MacroAssembler::kMissOnBoundFunction);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (HasCallSiteInlineCheck()) {
+ // Patch the (relocated) inlined map check.
+ __ GetRelocatedValueLocation(map_check_site, scratch1);
+ // We have a cell, so need another level of dereferencing.
+ __ Ldr(scratch1, MemOperand(scratch1));
+ __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
+ } else {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ }
+
+ Label return_true, return_result;
+ {
+ // Loop through the prototype chain looking for the function prototype.
+ Register chain_map = x1;
+ Register chain_prototype = x14;
+ Register null_value = x15;
+ Label loop;
+ __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ // Speculatively set a result.
+ __ Mov(result, res_false);
+
+ __ Bind(&loop);
+
+ // If the chain prototype is the object prototype, return true.
+ __ Cmp(chain_prototype, prototype);
+ __ B(eq, &return_true);
+
+ // If the chain prototype is null, we've reached the end of the chain, so
+ // return false.
+ __ Cmp(chain_prototype, null_value);
+ __ B(eq, &return_result);
+
+ // Otherwise, load the next prototype in the chain, and loop.
+ __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
+ __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
+ __ B(&loop);
+ }
+
+ // Return sequence when no arguments are on the stack.
+ // We cannot fall through to here.
+ __ Bind(&return_true);
+ __ Mov(result, res_true);
+ __ Bind(&return_result);
+ if (HasCallSiteInlineCheck()) {
+ ASSERT(ReturnTrueFalseObject());
+ __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
+ __ GetRelocatedValueLocation(map_check_site, scratch2);
+ __ Str(result, MemOperand(scratch2));
+ } else {
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ }
+ __ Ret();
+
+ Label object_not_null, object_not_null_or_smi;
+
+ __ Bind(&not_js_object);
+ Register object_type = x14;
+ // x0 result result return register (uninit)
+ // x10 function pointer to function
+ // x11 object pointer to object
+ // x14 object_type type of object (uninit)
+
+ // Before null, smi and string checks, check that the rhs is a function.
+ // For a non-function rhs, an exception must be thrown.
+ __ JumpIfSmi(function, &slow);
+ __ JumpIfNotObjectType(
+ function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
+
+ __ Mov(result, res_false);
+
+ // Null is not instance of anything.
+ __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value()));
+ __ B(ne, &object_not_null);
+ __ Ret();
+
+ __ Bind(&object_not_null);
+ // Smi values are not instances of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ __ Ret();
+
+ __ Bind(&object_not_null_or_smi);
+ // String values are not instances of anything.
+ __ IsObjectJSStringType(object, scratch2, &slow);
+ __ Ret();
+
+ // Slow-case. Tail call builtin.
+ __ Bind(&slow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments have either been passed into registers or have been previously
+ // popped. We need to push them before calling builtin.
+ __ Push(object, function);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
+ if (ReturnTrueFalseObject()) {
+ // Reload true/false because they were clobbered in the builtin call.
+ __ LoadTrueFalseRoots(res_true, res_false);
+ __ Cmp(result, 0);
+ __ Csel(result, res_true, res_false, eq);
+ }
+ __ Ret();
+}
+
+
+Register InstanceofStub::left() {
+ // Object to check (instanceof lhs).
+ return x11;
+}
+
+
+Register InstanceofStub::right() {
+ // Constructor function (instanceof rhs).
+ return x10;
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ Register arg_count = x0;
+ Register key = x1;
+
+ // The displacement is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(key, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register local_fp = x11;
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label skip_adaptor;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Csel(local_fp, fp, caller_fp, ne);
+ __ B(ne, &skip_adaptor);
+
+ // Load the actual arguments limit found in the arguments adaptor frame.
+ __ Ldr(arg_count, MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Bind(&skip_adaptor);
+
+ // Check index against formal parameters count limit. Use unsigned comparison
+ // to get negative check for free: branch if key < 0 or key >= arg_count.
+ __ Cmp(key, arg_count);
+ __ B(hs, &slow);
+
+ // Read the argument from the stack and return it.
+ __ Sub(x10, arg_count, key);
+ __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
+ __ Ldr(x0, MemOperand(x10, kDisplacement));
+ __ Ret();
+
+ // Slow case: handle non-smi or out-of-bounds access to arguments by calling
+ // the runtime system.
+ __ Bind(&slow);
+ __ Push(key);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Register caller_fp = x10;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ // Load and untag the context.
+ STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
+ __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
+ (kSmiShift / kBitsPerByte)));
+ __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
+ __ B(ne, &runtime);
+
+ // Patch the arguments.length and parameters pointer in the current frame.
+ __ Ldr(x11, MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Poke(x11, 0 * kXRegSize);
+ __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
+ __ Add(x10, x10, StandardFrameConstants::kCallerSPOffset);
+ __ Poke(x10, 1 * kXRegSize);
+
+ __ Bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+ //
+ // Returns pointer to result object in x0.
+
+ // Note: arg_count_smi is an alias of param_count_smi.
+ Register arg_count_smi = x3;
+ Register param_count_smi = x3;
+ Register param_count = x7;
+ Register recv_arg = x14;
+ Register function = x4;
+ __ Pop(param_count_smi, recv_arg, function);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(eq, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+
+ // x1 mapped_params number of mapped params, min(params, args) (uninit)
+ // x2 arg_count number of function arguments (uninit)
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 caller_fp caller's frame pointer
+ // x14 recv_arg pointer to receiver arguments
+
+ Register arg_count = x2;
+ __ Mov(arg_count, param_count);
+ __ B(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ Bind(&adaptor_frame);
+ __ Ldr(arg_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(arg_count, arg_count_smi);
+ __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Compute the mapped parameter count = min(param_count, arg_count)
+ Register mapped_params = x1;
+ __ Cmp(param_count, arg_count);
+ __ Csel(mapped_params, param_count, arg_count, lt);
+
+ __ Bind(&try_allocate);
+
+ // x0 alloc_obj pointer to allocated objects: param map, backing
+ // store, arguments (uninit)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x10 size size of objects to allocate (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ // Compute the size of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has two extra words containing context and backing
+ // store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+
+ // Calculate the parameter map size, assuming it exists.
+ Register size = x10;
+ __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(size, size, kParameterMapHeaderSize);
+
+ // If there are no mapped parameters, set the running size total to zero.
+ // Otherwise, use the parameter map size calculated earlier.
+ __ Cmp(mapped_params, 0);
+ __ CzeroX(size, eq);
+
+ // 2. Add the size of the backing store and arguments object.
+ __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(size, size,
+ FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
+
+ // Do the allocation of all three objects in one go. Assign this to x0, as it
+ // will be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Register global_object = x10;
+ Register global_ctx = x10;
+ Register args_offset = x11;
+ Register aliased_args_offset = x10;
+ __ Ldr(global_object, GlobalObjectMemOperand());
+ __ Ldr(global_ctx, FieldMemOperand(global_object,
+ GlobalObject::kNativeContextOffset));
+
+ __ Ldr(args_offset,
+ ContextMemOperand(global_ctx,
+ Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Ldr(aliased_args_offset,
+ ContextMemOperand(global_ctx,
+ Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Cmp(mapped_params, 0);
+ __ CmovX(args_offset, aliased_args_offset, ne);
+
+ // Copy the JS object part.
+ __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
+ JSObject::kHeaderSize / kPointerSize);
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
+
+ // Use the length and set that as an in-object property.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, "elements" will point there, otherwise
+ // it will point to the backing store.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x14 recv_arg pointer to receiver arguments
+
+ Register elements = x5;
+ __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ Cmp(mapped_params, 0);
+ // Set up backing store address, because it is needed later for filling in
+ // the unmapped arguments.
+ Register backing_store = x6;
+ __ CmovX(backing_store, elements, eq);
+ __ B(eq, &skip_parameter_map);
+
+ __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Add(x10, mapped_params, 2);
+ __ SmiTag(x10);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Str(cp, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize);
+ __ Str(x10, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. Then index the context,
+ // where parameters are stored in reverse order, at:
+ //
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
+ //
+ // The mapped parameter thus needs to get indices:
+ //
+ // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
+ // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
+ //
+ // We loop from right to left.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x11 loop_count parameter loop counter (uninit)
+ // x12 index parameter index (smi, uninit)
+ // x13 the_hole hole value (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Register loop_count = x11;
+ Register index = x12;
+ Register the_hole = x13;
+ Label parameters_loop, parameters_test;
+ __ Mov(loop_count, mapped_params);
+ __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
+ __ Sub(index, index, mapped_params);
+ __ SmiTag(index);
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(backing_store, backing_store, kParameterMapHeaderSize);
+
+ __ B(&parameters_test);
+
+ __ Bind(&parameters_loop);
+ __ Sub(loop_count, loop_count, 1);
+ __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
+ __ Str(index, MemOperand(elements, x10));
+ __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
+ __ Str(the_hole, MemOperand(backing_store, x10));
+ __ Add(index, index, Smi::FromInt(1));
+ __ Bind(&parameters_test);
+ __ Cbnz(loop_count, &parameters_loop);
+
+ __ Bind(&skip_parameter_map);
+ // Copy arguments header and remaining slots (if there are any.)
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
+ __ Str(arg_count_smi, FieldMemOperand(backing_store,
+ FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x4 function function pointer
+ // x3 arg_count_smi number of function arguments (smi)
+ // x6 backing_store pointer to backing store (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Label arguments_loop, arguments_test;
+ __ Mov(x10, mapped_params);
+ __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
+ __ B(&arguments_test);
+
+ __ Bind(&arguments_loop);
+ __ Sub(recv_arg, recv_arg, kPointerSize);
+ __ Ldr(x11, MemOperand(recv_arg));
+ __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
+ __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+ __ Add(x10, x10, 1);
+
+ __ Bind(&arguments_test);
+ __ Cmp(x10, arg_count);
+ __ B(lt, &arguments_loop);
+
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, recv_arg, arg_count_smi);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+ //
+ // Returns pointer to result object in x0.
+
+ // Get the stub arguments from the frame, and make an untagged copy of the
+ // parameter count.
+ Register param_count_smi = x1;
+ Register params = x2;
+ Register function = x3;
+ Register param_count = x13;
+ __ Pop(param_count_smi, params, function);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Test if arguments adaptor needed.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label try_allocate, runtime;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &try_allocate);
+
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x11 caller_fp caller's frame pointer
+ // x13 param_count number of parameters passed to function
+
+ // Patch the argument length and parameters pointer.
+ __ Ldr(param_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(param_count, param_count_smi);
+ __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
+ __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Try the new space allocation. Start out with computing the size of the
+ // arguments object and the elements array in words.
+ Register size = x10;
+ __ Bind(&try_allocate);
+ __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
+ __ Cmp(param_count, 0);
+ __ CzeroX(size, eq);
+ __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
+
+ // Do the allocation of both objects in one go. Assign this to x0, as it will
+ // be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current (native) context.
+ Register global_object = x10;
+ Register global_ctx = x10;
+ Register args_offset = x4;
+ __ Ldr(global_object, GlobalObjectMemOperand());
+ __ Ldr(global_ctx, FieldMemOperand(global_object,
+ GlobalObject::kNativeContextOffset));
+ __ Ldr(args_offset,
+ ContextMemOperand(global_ctx,
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX));
+
+ // x0 alloc_obj pointer to allocated objects: parameter array and
+ // arguments object
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x4 args_offset offset to arguments boilerplate
+ // x13 param_count number of parameters passed to function
+
+ // Copy the JS object part.
+ __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
+ JSObject::kHeaderSize / kPointerSize);
+
+ // Set the smi-tagged length as an in-object property.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ Cbz(param_count, &done);
+
+ // Set up the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ Register elements = x5;
+ __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects: parameter array and
+ // arguments object
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x4 array pointer to array slot (uninit)
+ // x5 elements pointer to elements array of alloc_obj
+ // x13 param_count number of parameters passed to function
+
+ // Copy the fixed array slots.
+ Label loop;
+ Register array = x4;
+ // Set up pointer to first array slot.
+ __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+
+ __ Bind(&loop);
+ // Pre-decrement the parameters pointer by kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
+ // Post-increment elements by kPointerSize on each iteration.
+ __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
+ __ Sub(param_count, param_count, 1);
+ __ Cbnz(param_count, &loop);
+
+ // Return from stub.
+ __ Bind(&done);
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, params, param_count_smi);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+
+ // Stack frame on entry.
+ // jssp[0]: last_match_info (expected JSArray)
+ // jssp[8]: previous index
+ // jssp[16]: subject string
+ // jssp[24]: JSRegExp object
+ Label runtime;
+
+ // Use of registers for this function.
+
+ // Variable registers:
+ // x10-x13 used as scratch registers
+ // w0 string_type type of subject string
+ // x2 jsstring_length subject string length
+ // x3 jsregexp_object JSRegExp object
+ // w4 string_encoding ASCII or UC16
+ // w5 sliced_string_offset if the string is a SlicedString
+ // offset to the underlying string
+ // w6 string_representation groups attributes of the string:
+ // - is a string
+ // - type of the string
+ // - is a short external string
+ Register string_type = w0;
+ Register jsstring_length = x2;
+ Register jsregexp_object = x3;
+ Register string_encoding = w4;
+ Register sliced_string_offset = w5;
+ Register string_representation = w6;
+
+ // These are in callee save registers and will be preserved by the call
+ // to the native RegExp code, as this code is called using the normal
+ // C calling convention. When calling directly from generated code the
+ // native RegExp code will not do a GC and therefore the content of
+ // these registers are safe to use after the call.
+
+ // x19 subject subject string
+ // x20 regexp_data RegExp data (FixedArray)
+ // x21 last_match_info_elements info relative to the last match
+ // (FixedArray)
+ // x22 code_object generated regexp code
+ Register subject = x19;
+ Register regexp_data = x20;
+ Register last_match_info_elements = x21;
+ Register code_object = x22;
+
+ // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
+ CPURegList used_callee_saved_registers(subject,
+ regexp_data,
+ last_match_info_elements,
+ code_object);
+ __ PushCPURegList(used_callee_saved_registers);
+
+ // Stack frame.
+ // jssp[0] : x19
+ // jssp[8] : x20
+ // jssp[16]: x21
+ // jssp[24]: x22
+ // jssp[32]: last_match_info (JSArray)
+ // jssp[40]: previous index
+ // jssp[48]: subject string
+ // jssp[56]: JSRegExp object
+
+ const int kLastMatchInfoOffset = 4 * kPointerSize;
+ const int kPreviousIndexOffset = 5 * kPointerSize;
+ const int kSubjectOffset = 6 * kPointerSize;
+ const int kJSRegExpOffset = 7 * kPointerSize;
+
+ // Ensure that a RegExp stack is allocated.
+ Isolate* isolate = masm->isolate();
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate);
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ __ Mov(x10, address_of_regexp_stack_memory_size);
+ __ Ldr(x10, MemOperand(x10));
+ __ Cbz(x10, &runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(jsregexp_object, kJSRegExpOffset);
+ __ JumpIfSmi(jsregexp_object, &runtime);
+ __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(regexp_data, kSmiTagMask);
+ __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
+ __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ }
+
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
+ __ B(ne, &runtime);
+
+ // Check that the number of captures fit in the static offsets vector buffer.
+ // We have always at least one capture for the whole match, plus additional
+ // ones due to capturing parentheses. A capture takes 2 registers.
+ // The number of capture registers then is (number_of_captures + 1) * 2.
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(regexp_data,
+ JSRegExp::kIrregexpCaptureCountOffset));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // number_of_captures * 2 <= offsets vector size - 2
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ Add(x10, x10, x10);
+ __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
+ __ B(hi, &runtime);
+
+ // Initialize offset for possibly sliced string.
+ __ Mov(sliced_string_offset, 0);
+
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(subject, kSubjectOffset);
+ __ JumpIfSmi(subject, &runtime);
+
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+
+ __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
+
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label check_underlying; // (4)
+ Label seq_string; // (5)
+ Label not_seq_nor_cons; // (6)
+ Label external_string; // (7)
+ Label not_long_external; // (8)
+
+ // (1) Sequential string? If yes, go to (5).
+ __ And(string_representation,
+ string_type,
+ kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask);
+ // We depend on the fact that Strings of type
+ // SeqString and not ShortExternalString are defined
+ // by the following pattern:
+ // string_type: 0XX0 XX00
+ // ^ ^ ^^
+ // | | ||
+ // | | is a SeqString
+ // | is not a short external String
+ // is a String
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ Cbz(string_representation, &seq_string); // Go to (5).
+
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ __ Cmp(string_representation, kExternalStringTag);
+ __ B(ge, &not_seq_nor_cons); // Go to (6).
+
+ // (3) Cons string. Check that it's flat.
+ __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
+ // Replace subject with first string.
+ __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+
+ // (4) Is subject external? If yes, go to (7).
+ __ Bind(&check_underlying);
+ // Reload the string type.
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ TestAndBranchIfAnySet(string_type.X(),
+ kStringRepresentationMask,
+ &external_string); // Go to (7).
+
+ // (5) Sequential string. Load regexp code according to encoding.
+ __ Bind(&seq_string);
+
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(x10, kPreviousIndexOffset);
+ __ JumpIfNotSmi(x10, &runtime);
+ __ Cmp(jsstring_length, x10);
+ __ B(ls, &runtime);
+
+ // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
+ // before entering the exit frame.
+ __ SmiUntag(x1, x10);
+
+ // The third bit determines the string encoding in string_type.
+ STATIC_ASSERT(kOneByteStringTag == 0x04);
+ STATIC_ASSERT(kTwoByteStringTag == 0x00);
+ STATIC_ASSERT(kStringEncodingMask == 0x04);
+
+ // Find the code object based on the assumptions above.
+ // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
+ // of kPointerSize to reach the latter.
+ ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
+ JSRegExp::kDataUC16CodeOffset);
+ __ Mov(x10, kPointerSize);
+ // We will need the encoding later: ASCII = 0x04
+ // UC16 = 0x00
+ __ Ands(string_encoding, string_type, kStringEncodingMask);
+ __ CzeroX(x10, ne);
+ __ Add(x10, regexp_data, x10);
+ __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
+
+ // (E) Carry on. String handling is done.
+
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(code_object, &runtime);
+
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1,
+ x10,
+ x11);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ __ EnterExitFrame(false, x10, 1);
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // We have 9 arguments to pass to the regexp code, therefore we have to pass
+ // one on the stack and the rest as registers.
+
+ // Note that the placement of the argument on the stack isn't standard
+ // AAPCS64:
+ // csp[0]: Space for the return address placed by DirectCEntryStub.
+ // csp[8]: Argument 9, the current isolate address.
+
+ __ Mov(x10, ExternalReference::isolate_address(isolate));
+ __ Poke(x10, kPointerSize);
+
+ Register length = w11;
+ Register previous_index_in_bytes = w12;
+ Register start = x13;
+
+ // Load start of the subject string.
+ __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
+ // Load the length from the original subject string from the previous stack
+ // frame. Therefore we have to use fp, which points exactly to two pointer
+ // sizes below the previous sp. (Because creating a new stack frame pushes
+ // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
+ __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+ __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
+
+ // Handle UC16 encoding, two bytes make one character.
+ // string_encoding: if ASCII: 0x04
+ // if UC16: 0x00
+ STATIC_ASSERT(kStringEncodingMask == 0x04);
+ __ Ubfx(string_encoding, string_encoding, 2, 1);
+ __ Eor(string_encoding, string_encoding, 1);
+ // string_encoding: if ASCII: 0
+ // if UC16: 1
+
+ // Convert string positions from characters to bytes.
+ // Previous index is in x1.
+ __ Lsl(previous_index_in_bytes, w1, string_encoding);
+ __ Lsl(length, length, string_encoding);
+ __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
+
+ // Argument 1 (x0): Subject string.
+ __ Mov(x0, subject);
+
+ // Argument 2 (x1): Previous index, already there.
+
+ // Argument 3 (x2): Get the start of input.
+ // Start of input = start of string + previous index + substring offset
+ // (0 if the string
+ // is not sliced).
+ __ Add(w10, previous_index_in_bytes, sliced_string_offset);
+ __ Add(x2, start, Operand(w10, UXTW));
+
+ // Argument 4 (x3):
+ // End of input = start of input + (length of input - previous index)
+ __ Sub(w10, length, previous_index_in_bytes);
+ __ Add(x3, x2, Operand(w10, UXTW));
+
+ // Argument 5 (x4): static offsets vector buffer.
+ __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate));
+
+ // Argument 6 (x5): Set the number of capture registers to zero to force
+ // global regexps to behave as non-global. This stub is not used for global
+ // regexps.
+ __ Mov(x5, 0);
+
+ // Argument 7 (x6): Start (high end) of backtracking stack memory area.
+ __ Mov(x10, address_of_regexp_stack_memory_address);
+ __ Ldr(x10, MemOperand(x10));
+ __ Mov(x11, address_of_regexp_stack_memory_size);
+ __ Ldr(x11, MemOperand(x11));
+ __ Add(x6, x10, x11);
+
+ // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
+ __ Mov(x7, 1);
+
+ // Locate the code entry and call it.
+ __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm, code_object);
+
+ __ LeaveExitFrame(false, x10, true);
+
+ // The generated regexp code returns an int32 in w0.
+ Label failure, exception;
+ __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
+ __ CompareAndBranch(w0,
+ NativeRegExpMacroAssembler::EXCEPTION,
+ eq,
+ &exception);
+ __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
+
+ // Success: process the result from the native regexp code.
+ Register number_of_capture_registers = x12;
+
+ // Calculate number of capture registers (number_of_captures + 1) * 2
+ // and store it in the last match info.
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(regexp_data,
+ JSRegExp::kIrregexpCaptureCountOffset));
+ __ Add(x10, x10, x10);
+ __ Add(number_of_capture_registers, x10, 2);
+
+ // Check that the fourth object is a JSArray object.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(x10, kLastMatchInfoOffset);
+ __ JumpIfSmi(x10, &runtime);
+ __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
+
+ // Check that the JSArray is the fast case.
+ __ Ldr(last_match_info_elements,
+ FieldMemOperand(x10, JSArray::kElementsOffset));
+ __ Ldr(x10,
+ FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
+
+ // Check that the last match info has space for the capture registers and the
+ // additional information (overhead).
+ // (number_of_captures + 1) * 2 + overhead <= last match info size
+ // (number_of_captures * 2) + 2 + overhead <= last match info size
+ // number_of_capture_registers + overhead <= last match info size
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(last_match_info_elements,
+ FixedArray::kLengthOffset));
+ __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
+ __ Cmp(x11, x10);
+ __ B(gt, &runtime);
+
+ // Store the capture count.
+ __ SmiTag(x10, number_of_capture_registers);
+ __ Str(x10,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ Str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ // Use x10 as the subject string in order to only need
+ // one RecordWriteStub.
+ __ Mov(x10, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ x10,
+ x11,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ Str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ Mov(x10, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ x10,
+ x11,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+
+ Register last_match_offsets = x13;
+ Register offsets_vector_index = x14;
+ Register current_offset = x15;
+
+ // Get the static offsets vector filled by the native regexp code
+ // and fill the last match info.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(isolate);
+ __ Mov(offsets_vector_index, address_of_static_offsets_vector);
+
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // iterates down to zero (inclusive).
+ __ Add(last_match_offsets,
+ last_match_info_elements,
+ RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
+ __ Bind(&next_capture);
+ __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
+ __ B(mi, &done);
+ // Read two 32 bit values from the static offsets vector buffer into
+ // an X register
+ __ Ldr(current_offset,
+ MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
+ // Store the smi values in the last match info.
+ __ SmiTag(x10, current_offset);
+ // Clearing the 32 bottom bits gives us a Smi.
+ STATIC_ASSERT(kSmiShift == 32);
+ __ And(x11, current_offset, ~kWRegMask);
+ __ Stp(x10,
+ x11,
+ MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
+ __ B(&next_capture);
+ __ Bind(&done);
+
+ // Return last match info.
+ __ Peek(x0, kLastMatchInfoOffset);
+ __ PopCPURegList(used_callee_saved_registers);
+ // Drop the 4 arguments of the stub from the stack.
+ __ Drop(4);
+ __ Ret();
+
+ __ Bind(&exception);
+ Register exception_value = x0;
+ // A stack overflow (on the backtrack stack) may have occured
+ // in the RegExp code but no exception has been created yet.
+ // If there is no pending exception, handle that in the runtime system.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x11,
+ Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Ldr(exception_value, MemOperand(x11));
+ __ Cmp(x10, exception_value);
+ __ B(eq, &runtime);
+
+ __ Str(x10, MemOperand(x11)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ Label termination_exception;
+ __ JumpIfRoot(exception_value,
+ Heap::kTerminationExceptionRootIndex,
+ &termination_exception);
+
+ __ Throw(exception_value, x10, x11, x12, x13);
+
+ __ Bind(&termination_exception);
+ __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
+
+ __ Bind(&failure);
+ __ Mov(x0, Operand(masm->isolate()->factory()->null_value()));
+ __ PopCPURegList(used_callee_saved_registers);
+ // Drop the 4 arguments of the stub from the stack.
+ __ Drop(4);
+ __ Ret();
+
+ __ Bind(&runtime);
+ __ PopCPURegList(used_callee_saved_registers);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ Bind(&not_seq_nor_cons);
+ // Compare flags are still set.
+ __ B(ne, &not_long_external); // Go to (8).
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ __ Bind(&external_string);
+ if (masm->emit_debug_code()) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Tst(x10, kIsIndirectStringMask);
+ __ Check(eq, kExternalStringExpectedButNotFound);
+ __ And(x10, x10, kStringRepresentationMask);
+ __ Cmp(x10, 0);
+ __ Check(ne, kExternalStringExpectedButNotFound);
+ }
+ __ Ldr(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ B(&seq_string); // Go to (5).
+
+ // (8) If this is a short external string or not a string, bail out to
+ // runtime.
+ __ Bind(&not_long_external);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ TestAndBranchIfAnySet(string_representation,
+ kShortExternalStringMask | kIsNotStringMask,
+ &runtime);
+
+ // (9) Sliced string. Replace subject with parent.
+ __ Ldr(sliced_string_offset,
+ UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ B(&check_underlying); // Go to (4).
+#endif
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm,
+ Register argc,
+ Register function,
+ Register feedback_vector,
+ Register index,
+ Register scratch1,
+ Register scratch2) {
+ ASM_LOCATION("GenerateRecordCallTarget");
+ ASSERT(!AreAliased(scratch1, scratch2,
+ argc, function, feedback_vector, index));
+ // Cache the called function in a feedback vector slot. Cache states are
+ // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
+ // argc : number of arguments to the construct function
+ // function : the function to call
+ // feedback_vector : the feedback vector
+ // index : slot in feedback vector (smi)
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
+
+ // Load the cache state.
+ __ Add(scratch1, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ Cmp(scratch1, function);
+ __ B(eq, &done);
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in scratch1 register.
+ __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
+ __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ Cmp(function, scratch1);
+ __ B(ne, &megamorphic);
+ __ B(&done);
+ }
+
+ __ Bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ Bind(&megamorphic);
+ __ Add(scratch1, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex);
+ __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ Bind(&initialize);
+
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ Cmp(function, scratch1);
+ __ B(ne, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateAllocationSiteStub create_stub;
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(argc);
+ __ Push(argc, function, feedback_vector, index);
+
+ // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
+ // index in x3.
+ ASSERT(feedback_vector.Is(x2) && index.Is(x3));
+ __ CallStub(&create_stub);
+
+ __ Pop(index, feedback_vector, function, argc);
+ __ SmiUntag(argc);
+ }
+ __ B(&done);
+
+ __ Bind(&not_array_function);
+ }
+
+ // An uninitialized cache is patched with the function.
+
+ __ Add(scratch1, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(function, MemOperand(scratch1, 0));
+
+ __ Push(function);
+ __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(function);
+
+ __ Bind(&done);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallFunctionStub::Generate");
+ // x1 function the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if x2 is not the megamorphic symbol)
+ Register function = x1;
+ Register cache_cell = x2;
+ Register slot = x3;
+ Register type = x4;
+ Label slow, non_function, wrap, cont;
+
+ // TODO(jbramley): This function has a lot of unnamed registers. Name them,
+ // and tidy things up a bit.
+
+ if (NeedsChecks()) {
+ // Check that the function is really a JavaScript function.
+ __ JumpIfSmi(function, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm, x0, function, cache_cell, slot, x4, x5);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in ebx we need
+ // to set ebx to undefined.
+ __ LoadRoot(cache_cell, Heap::kUndefinedValueRootIndex);
+ }
+ }
+
+ // Fast-case: Invoke the function now.
+ // x1 function pushed function
+ ParameterCount actual(argc_);
+
+ if (CallAsMethod()) {
+ if (NeedsChecks()) {
+ // Do not transform the receiver for strict mode functions.
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont);
+
+ // Do not transform the receiver for native (Compilerhints already in x3).
+ __ Tbnz(w4, SharedFunctionInfo::kNative, &cont);
+ }
+
+ // Compute the receiver in sloppy mode.
+ __ Peek(x3, argc_ * kPointerSize);
+
+ if (NeedsChecks()) {
+ __ JumpIfSmi(x3, &wrap);
+ __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
+ } else {
+ __ B(&wrap);
+ }
+
+ __ Bind(&cont);
+ }
+ __ InvokeFunction(function,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper());
+
+ if (NeedsChecks()) {
+ // Slow-case: Non-function called.
+ __ Bind(&slow);
+ if (RecordCallTarget()) {
+ // If there is a call target cache, mark it megamorphic in the
+ // non-function case. MegamorphicSentinel is an immortal immovable object
+ // (megamorphic symbol) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot,
+ kPointerSizeLog2));
+ __ LoadRoot(x11, Heap::kMegamorphicSymbolRootIndex);
+ __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+ }
+ // Check for function proxy.
+ // x10 : function type.
+ __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function);
+ __ Push(function); // put proxy as additional argument
+ __ Mov(x0, argc_ + 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ Bind(&non_function);
+ __ Poke(function, argc_ * kXRegSize);
+ __ Mov(x0, argc_); // Set up the number of arguments.
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ if (CallAsMethod()) {
+ __ Bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Pop(x1);
+ }
+ __ Poke(x0, argc_ * kPointerSize);
+ __ B(&cont);
+ }
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallConstructStub::Generate");
+ // x0 : number of arguments
+ // x1 : the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ Register function = x1;
+ Label slow, non_function_call;
+
+ // Check that the function is not a smi.
+ __ JumpIfSmi(function, &non_function_call);
+ // Check that the function is a JSFunction.
+ Register object_type = x10;
+ __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
+ &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
+
+ __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into x2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by x3 + 1.
+ __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into x2, or undefined.
+ __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
+ __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
+ __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
+ &feedback_register_initialized);
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(x2, x5);
+ }
+
+ // Jump to the function-specific construct stub.
+ Register jump_reg = x4;
+ Register shared_func_info = jump_reg;
+ Register cons_stub = jump_reg;
+ Register cons_stub_code = jump_reg;
+ __ Ldr(shared_func_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(cons_stub,
+ FieldMemOperand(shared_func_info,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(cons_stub_code);
+
+ Label do_call;
+ __ Bind(&slow);
+ __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
+ __ B(ne, &non_function_call);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ B(&do_call);
+
+ __ Bind(&non_function_call);
+ __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+
+ __ Bind(&do_call);
+ // Set expected number of arguments to zero (not changing x0).
+ __ Mov(x2, 0);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+
+ // If the receiver is not a string trigger the non-string case.
+ __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ __ Bind(&got_smi_index_);
+ // Check for index out of range.
+ __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
+ __ Cmp(result_, Operand::UntagSmi(index_));
+ __ B(ls, index_out_of_range_);
+
+ __ SmiUntag(index_);
+
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_.W(),
+ result_,
+ &call_runtime_);
+ __ SmiTag(result_);
+ __ Bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+ __ Bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ result_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ // Save object_ on the stack and pass index_ as argument for runtime call.
+ __ Push(object_, index_);
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+ }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Mov(index_, x0);
+ __ Pop(object_);
+ // Reload the instance type.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(index_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ B(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ Bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ SmiTag(index_);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+ __ Mov(result_, x0);
+ call_helper.AfterCall(masm);
+ __ B(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ __ JumpIfNotSmi(code_, &slow_case_);
+ __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
+ __ B(hi, &slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ASCII char code.
+ STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
+ __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
+ __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
+ __ Bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+ __ Bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ Push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Mov(result_, x0);
+ call_helper.AfterCall(masm);
+ __ B(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ // Inputs are in x0 (lhs) and x1 (rhs).
+ ASSERT(state_ == CompareIC::SMI);
+ ASM_LOCATION("ICCompareStub[Smis]");
+ Label miss;
+ // Bail out (to 'miss') unless both x0 and x1 are smis.
+ __ JumpIfEitherNotSmi(x0, x1, &miss);
+
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ __ Sub(x0, x0, x1);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(x1);
+ __ Sub(x0, x1, Operand::UntagSmi(x0));
+ }
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
+ ASM_LOCATION("ICCompareStub[HeapNumbers]");
+
+ Label unordered, maybe_undefined1, maybe_undefined2;
+ Label miss, handle_lhs, values_in_d_regs;
+ Label untag_rhs, untag_lhs;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+ FPRegister rhs_d = d0;
+ FPRegister lhs_d = d1;
+
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(lhs, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rhs, &miss);
+ }
+
+ __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
+ __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
+
+ // Load rhs if it's a heap number.
+ __ JumpIfSmi(rhs, &handle_lhs);
+ __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+ // Load lhs if it's a heap number.
+ __ Bind(&handle_lhs);
+ __ JumpIfSmi(lhs, &values_in_d_regs);
+ __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ __ Bind(&values_in_d_regs);
+ __ Fcmp(lhs_d, rhs_d);
+ __ B(vs, &unordered); // Overflow flag set if either is NaN.
+ STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
+ __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
+ __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
+ __ Ret();
+
+ __ Bind(&unordered);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+
+ __ Bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
+ __ JumpIfSmi(lhs, &unordered);
+ __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
+ __ B(&unordered);
+ }
+
+ __ Bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ ASM_LOCATION("ICCompareStub[InternalizedStrings]");
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(lhs, rhs, &miss);
+
+ // Check that both operands are internalized strings.
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ Register rhs_type = x10;
+ Register lhs_type = x11;
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ __ Orr(x12, lhs_type, rhs_type);
+ __ TestAndBranchIfAnySet(
+ x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
+
+ // Internalized strings are compared by identity.
+ STATIC_ASSERT(EQUAL == 0);
+ __ Cmp(lhs, rhs);
+ __ Cset(result, ne);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASM_LOCATION("ICCompareStub[UniqueNames]");
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ Register lhs_instance_type = w2;
+ Register rhs_instance_type = w3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(lhs, rhs, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
+
+ // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
+ // should have kInternalizedTag set.
+ __ JumpIfNotUniqueName(lhs_instance_type, &miss);
+ __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+
+ // Unique names are compared by identity.
+ STATIC_ASSERT(EQUAL == 0);
+ __ Cmp(lhs, rhs);
+ __ Cset(result, ne);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRING);
+ ASM_LOCATION("ICCompareStub[Strings]");
+
+ Label miss;
+
+ bool equality = Token::IsEqualityOp(op_);
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ // Check that both operands are strings.
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ Register rhs_type = x10;
+ Register lhs_type = x11;
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ Orr(x12, lhs_type, rhs_type);
+ __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
+
+ // Fast check for identical strings.
+ Label not_equal;
+ __ Cmp(lhs, rhs);
+ __ B(ne, &not_equal);
+ __ Mov(result, EQUAL);
+ __ Ret();
+
+ __ Bind(&not_equal);
+ // Handle not identical strings
+
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
+ if (equality) {
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ Label not_internalized_strings;
+ __ Orr(x12, lhs_type, rhs_type);
+ __ TestAndBranchIfAnySet(
+ x12, kIsNotInternalizedMask, &not_internalized_strings);
+ // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
+ __ Ret();
+ __ Bind(&not_internalized_strings);
+ }
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(
+ lhs_type, rhs_type, x12, x13, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, lhs, rhs, x10, x11, x12);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, lhs, rhs, x10, x11, x12, x13);
+ }
+
+ // Handle more complex cases in runtime.
+ __ Bind(&runtime);
+ __ Push(lhs, rhs);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::OBJECT);
+ ASM_LOCATION("ICCompareStub[Objects]");
+
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
+ __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
+
+ ASSERT(GetCondition() == eq);
+ __ Sub(result, rhs, lhs);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ ASM_LOCATION("ICCompareStub[KnownObjects]");
+
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Cmp(rhs_map, Operand(known_map_));
+ __ B(ne, &miss);
+ __ Cmp(lhs_map, Operand(known_map_));
+ __ B(ne, &miss);
+
+ __ Sub(result, rhs, lhs);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+// This method handles the case where a compare stub had the wrong
+// implementation. It calls a miss handler, which re-writes the stub. All other
+// ICCompareStub::Generate* methods should fall back into this one if their
+// operands were not the expected types.
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("ICCompareStub[Miss]");
+
+ Register stub_entry = x11;
+ {
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ Register op = x10;
+ Register left = x1;
+ Register right = x0;
+ // Preserve some caller-saved registers.
+ __ Push(x1, x0, lr);
+ // Push the arguments.
+ __ Mov(op, Smi::FromInt(op_));
+ __ Push(left, right, op);
+
+ // Call the miss handler. This also pops the arguments.
+ __ CallExternalReference(miss, 3);
+
+ // Compute the entry point of the rewritten stub.
+ __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
+ // Restore caller-saved registers.
+ __ Pop(lr, x0, x1);
+ }
+
+ // Tail-call to the new stub.
+ __ Jump(stub_entry);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ ASSERT(!AreAliased(hash, character));
+
+ // hash = character + (character << 10);
+ __ LoadRoot(hash, Heap::kHashSeedRootIndex);
+ // Untag smi seed and add the character.
+ __ Add(hash, character, Operand(hash, LSR, kSmiShift));
+
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+
+ // hash += hash << 10;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
+ // hash ^= hash >> 6;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ ASSERT(!AreAliased(hash, character));
+
+ // hash += character;
+ __ Add(hash, hash, character);
+
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+
+ // hash += hash << 10;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
+ // hash ^= hash >> 6;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+ Register scratch_w = scratch.W();
+ ASSERT(!AreAliased(hash_w, scratch_w));
+
+ // hash += hash << 3;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
+ // hash ^= hash >> 11;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
+ // hash += hash << 15;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
+
+ __ Ands(hash_w, hash_w, String::kHashBitMask);
+
+ // if (hash == 0) hash = 27;
+ __ Mov(scratch_w, StringHasher::kZeroHash);
+ __ Csel(hash_w, scratch_w, hash_w, eq);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("SubStringStub::Generate");
+ Label runtime;
+
+ // Stack frame on entry.
+ // lr: return address
+ // jssp[0]: substring "to" offset
+ // jssp[8]: substring "from" offset
+ // jssp[16]: pointer to string object
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length (in debug mode.)
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+ Register to = x0;
+ Register from = x15;
+ Register input_string = x10;
+ Register input_length = x11;
+ Register input_type = x12;
+ Register result_string = x0;
+ Register result_length = x1;
+ Register temp = x3;
+
+ __ Peek(to, kToOffset);
+ __ Peek(from, kFromOffset);
+
+ // Check that both from and to are smis. If not, jump to runtime.
+ __ JumpIfEitherNotSmi(from, to, &runtime);
+ __ SmiUntag(from);
+ __ SmiUntag(to);
+
+ // Calculate difference between from and to. If to < from, branch to runtime.
+ __ Subs(result_length, to, from);
+ __ B(mi, &runtime);
+
+ // Check from is positive.
+ __ Tbnz(from, kWSignBit, &runtime);
+
+ // Make sure first argument is a string.
+ __ Peek(input_string, kStringOffset);
+ __ JumpIfSmi(input_string, &runtime);
+ __ IsObjectJSStringType(input_string, input_type, &runtime);
+
+ Label single_char;
+ __ Cmp(result_length, 1);
+ __ B(eq, &single_char);
+
+ // Short-cut for the case of trivial substring.
+ Label return_x0;
+ __ Ldrsw(input_length,
+ UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
+
+ __ Cmp(result_length, input_length);
+ __ CmovX(x0, input_string, eq);
+ // Return original string.
+ __ B(eq, &return_x0);
+
+ // Longer than original string's length or negative: unsafe arguments.
+ __ B(hi, &runtime);
+
+ // Shorter than original string's length: an actual substring.
+
+ // x0 to substring end character offset
+ // x1 result_length length of substring result
+ // x10 input_string pointer to input string object
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x15 from substring start character offset
+
+ // Deal with different string types: update the index if necessary and put
+ // the underlying string into register unpacked_string.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ Label update_instance_type;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+
+ // Test for string types, and branch/fall through to appropriate unpacking
+ // code.
+ __ Tst(input_type, kIsIndirectStringMask);
+ __ B(eq, &seq_or_external_string);
+ __ Tst(input_type, kSlicedNotConsMask);
+ __ B(ne, &sliced_string);
+
+ Register unpacked_string = input_string;
+
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
+ __ Ldr(unpacked_string,
+ FieldMemOperand(input_string, ConsString::kFirstOffset));
+ __ B(&update_instance_type);
+
+ __ Bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ Ldrsw(temp,
+ UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
+ __ Add(from, from, temp);
+ __ Ldr(unpacked_string,
+ FieldMemOperand(input_string, SlicedString::kParentOffset));
+
+ __ Bind(&update_instance_type);
+ __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
+ __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+ // Now control must go to &underlying_unpacked. Since the no code is generated
+ // before then we fall through instead of generating a useless branch.
+
+ __ Bind(&seq_or_external_string);
+ // Sequential or external string. Registers unpacked_string and input_string
+ // alias, so there's nothing to do here.
+ // Note that if code is added here, the above code must be updated.
+
+ // x0 result_string pointer to result string object (uninit)
+ // x1 result_length length of substring result
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x15 from substring start character offset
+ __ Bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ __ Cmp(result_length, SlicedString::kMinLength);
+ // Short slice. Copy instead of slicing.
+ __ B(lt, &copy_routine);
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyway due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
+ __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
+ &runtime);
+ __ B(&set_slice_header);
+
+ __ Bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
+ &runtime);
+
+ __ Bind(&set_slice_header);
+ __ SmiTag(from);
+ __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
+ __ Str(unpacked_string,
+ FieldMemOperand(result_string, SlicedString::kParentOffset));
+ __ B(&return_x0);
+
+ __ Bind(&copy_routine);
+ }
+
+ // x0 result_string pointer to result string object (uninit)
+ // x1 result_length length of substring result
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
+ // x13 substring_char0 pointer to first char of substring (uninit)
+ // x14 result_char0 pointer to first char of result (uninit)
+ // x15 from substring start character offset
+ Register unpacked_char0 = x13;
+ Register substring_char0 = x13;
+ Register result_char0 = x14;
+ Label two_byte_sequential, sequential_string, allocate_result;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+
+ __ Tst(input_type, kExternalStringTag);
+ __ B(eq, &sequential_string);
+
+ __ Tst(input_type, kShortExternalStringTag);
+ __ B(ne, &runtime);
+ __ Ldr(unpacked_char0,
+ FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
+ // unpacked_char0 points to the first character of the underlying string.
+ __ B(&allocate_result);
+
+ __ Bind(&sequential_string);
+ // Locate first character of underlying subject string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Add(unpacked_char0, unpacked_string,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ __ Bind(&allocate_result);
+ // Sequential ASCII string. Allocate the result.
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
+
+ // Allocate and copy the resulting ASCII string.
+ __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Add(substring_char0, unpacked_char0, from);
+
+ // Locate first character of result.
+ __ Add(result_char0, result_string,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
+ __ B(&return_x0);
+
+ // Allocate and copy the resulting two-byte string.
+ __ Bind(&two_byte_sequential);
+ __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
+
+ // Locate first character of result.
+ __ Add(result_char0, result_string,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ __ Add(result_length, result_length, result_length);
+ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
+
+ __ Bind(&return_x0);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
+ __ Drop(3);
+ __ Ret();
+
+ __ Bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+
+ __ bind(&single_char);
+ // x1: result_length
+ // x10: input_string
+ // x12: input_type
+ // x15: from (untagged)
+ __ SmiTag(from);
+ StringCharAtGenerator generator(
+ input_string, from, result_length, x0,
+ &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ Drop(3);
+ __ Ret();
+ generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
+ Register result = x0;
+ Register left_length = scratch1;
+ Register right_length = scratch2;
+
+ // Compare lengths. If lengths differ, strings can't be equal. Lengths are
+ // smis, and don't need to be untagged.
+ Label strings_not_equal, check_zero_length;
+ __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
+ __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
+ __ Cmp(left_length, right_length);
+ __ B(eq, &check_zero_length);
+
+ __ Bind(&strings_not_equal);
+ __ Mov(result, Smi::FromInt(NOT_EQUAL));
+ __ Ret();
+
+ // Check if the length is zero. If so, the strings must be equal (and empty.)
+ Label compare_chars;
+ __ Bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Cbnz(left_length, &compare_chars);
+ __ Mov(result, Smi::FromInt(EQUAL));
+ __ Ret();
+
+ // Compare characters. Falls through if all characters are equal.
+ __ Bind(&compare_chars);
+ GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
+ scratch3, &strings_not_equal);
+
+ // Characters in strings are equal.
+ __ Mov(result, Smi::FromInt(EQUAL));
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
+ Label result_not_equal, compare_lengths;
+
+ // Find minimum length and length difference.
+ Register length_delta = scratch3;
+ __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Subs(length_delta, scratch1, scratch2);
+
+ Register min_length = scratch1;
+ __ Csel(min_length, scratch2, scratch1, gt);
+ __ Cbz(min_length, &compare_lengths);
+
+ // Compare loop.
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, min_length, scratch2, scratch4,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ Bind(&compare_lengths);
+
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+
+ // Use length_delta as result if it's zero.
+ Register result = x0;
+ __ Subs(result, length_delta, 0);
+
+ __ Bind(&result_not_equal);
+ Register greater = x10;
+ Register less = x11;
+ __ Mov(greater, Smi::FromInt(GREATER));
+ __ Mov(less, Smi::FromInt(LESS));
+ __ CmovX(result, greater, gt);
+ __ CmovX(result, less, lt);
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal) {
+ ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
+
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Add(left, left, scratch1);
+ __ Add(right, right, scratch1);
+
+ Register index = length;
+ __ Neg(index, length); // index = -length;
+
+ // Compare loop
+ Label loop;
+ __ Bind(&loop);
+ __ Ldrb(scratch1, MemOperand(left, index));
+ __ Ldrb(scratch2, MemOperand(right, index));
+ __ Cmp(scratch1, scratch2);
+ __ B(ne, chars_not_equal);
+ __ Add(index, index, 1);
+ __ Cbnz(index, &loop);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ Counters* counters = masm->isolate()->counters();
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[8]: left string
+ Register right = x10;
+ Register left = x11;
+ Register result = x0;
+ __ Pop(right, left);
+
+ Label not_same;
+ __ Subs(result, right, left);
+ __ B(ne, &not_same);
+ STATIC_ASSERT(EQUAL == 0);
+ __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ __ Ret();
+
+ __ Bind(&not_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
+
+ // Compare flat ASCII strings natively. Remove arguments from stack first,
+ // as this function will generate a return.
+ __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
+
+ __ Bind(&runtime);
+
+ // Push arguments back on to the stack.
+ // sp[0] = right string
+ // sp[8] = left string.
+ __ Push(left, right);
+
+ // Call the runtime.
+ // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+}
+
+
+void ArrayPushStub::Generate(MacroAssembler* masm) {
+ Register receiver = x0;
+
+ int argc = arguments_count();
+
+ if (argc == 0) {
+ // Nothing to do, just return the length.
+ __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+ return;
+ }
+
+ Isolate* isolate = masm->isolate();
+
+ if (argc != 1) {
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ Label call_builtin, attempt_to_grow_elements, with_write_barrier;
+
+ Register elements_length = x8;
+ Register length = x7;
+ Register elements = x6;
+ Register end_elements = x5;
+ Register value = x4;
+ // Get the elements array of the object.
+ __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ x10,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+ }
+
+ // Get the array's length and calculate new length.
+ __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Add(length, length, Smi::FromInt(argc));
+
+ // Check if we could survive without allocation.
+ __ Ldr(elements_length,
+ FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(length, elements_length);
+
+ const int kEndElementsOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ __ B(gt, &attempt_to_grow_elements);
+
+ // Check if value is a smi.
+ __ Peek(value, (argc - 1) * kPointerSize);
+ __ JumpIfNotSmi(value, &with_write_barrier);
+
+ // Store the value.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+ } else {
+ __ B(gt, &call_builtin);
+
+ __ Peek(value, (argc - 1) * kPointerSize);
+ __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1,
+ &call_builtin, argc * kDoubleSize);
+ }
+
+ // Save new length.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Return length.
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ __ Bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ __ Bind(&with_write_barrier);
+
+ if (IsFastSmiElementsKind(elements_kind())) {
+ if (FLAG_trace_elements_transitions) {
+ __ B(&call_builtin);
+ }
+
+ __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ JumpIfHeapNumber(x10, &call_builtin);
+
+ ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
+ ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset));
+ __ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX));
+ const int header_size = FixedArrayBase::kHeaderSize;
+ // Verify that the object can be transitioned in place.
+ const int origin_offset = header_size + elements_kind() * kPointerSize;
+ __ ldr(x11, FieldMemOperand(receiver, origin_offset));
+ __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset));
+ __ cmp(x11, x12);
+ __ B(ne, &call_builtin);
+
+ const int target_offset = header_size + target_kind * kPointerSize;
+ __ Ldr(x10, FieldMemOperand(x10, target_offset));
+ __ Mov(x11, receiver);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, DONT_TRACK_ALLOCATION_SITE, NULL);
+ }
+
+ // Save new length.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Store the value.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+ __ RecordWrite(elements,
+ end_elements,
+ value,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ __ Bind(&attempt_to_grow_elements);
+
+ if (!FLAG_inline_new) {
+ __ B(&call_builtin);
+ }
+
+ Register argument = x2;
+ __ Peek(argument, (argc - 1) * kPointerSize);
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ if (IsFastSmiElementsKind(elements_kind())) {
+ __ JumpIfNotSmi(argument, &call_builtin);
+ }
+
+ // We could be lucky and the elements array could be at the top of new-space.
+ // In this case we can just grow it in place by moving the allocation pointer
+ // up.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate);
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate);
+
+ const int kAllocationDelta = 4;
+ ASSERT(kAllocationDelta >= argc);
+ Register allocation_top_addr = x5;
+ Register allocation_top = x9;
+ // Load top and check if it is the end of elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Add(end_elements, end_elements, kEndElementsOffset);
+ __ Mov(allocation_top_addr, new_space_allocation_top);
+ __ Ldr(allocation_top, MemOperand(allocation_top_addr));
+ __ Cmp(end_elements, allocation_top);
+ __ B(ne, &call_builtin);
+
+ __ Mov(x10, new_space_allocation_limit);
+ __ Ldr(x10, MemOperand(x10));
+ __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize);
+ __ Cmp(allocation_top, x10);
+ __ B(hi, &call_builtin);
+
+ // We fit and could grow elements.
+ // Update new_space_allocation_top.
+ __ Str(allocation_top, MemOperand(allocation_top_addr));
+ // Push the argument.
+ __ Str(argument, MemOperand(end_elements));
+ // Fill the rest with holes.
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ ASSERT(kAllocationDelta == 4);
+ __ Stp(x10, x10, MemOperand(end_elements, 1 * kPointerSize));
+ __ Stp(x10, x10, MemOperand(end_elements, 3 * kPointerSize));
+
+ // Update elements' and array's sizes.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Add(elements_length, elements_length, Smi::FromInt(kAllocationDelta));
+ __ Str(elements_length,
+ FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Elements are in new space, so write barrier is not required.
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ __ Bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x1 : left
+ // -- x0 : right
+ // -- lr : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ // Load x2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ LoadObject(x2, handle(isolate->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(x2, kExpectedAllocationSite);
+ __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
+ kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(state_);
+ __ TailCallStub(&stub);
+}
+
+
+bool CodeStub::CanUseFPRegisters() {
+ // FP registers always available on ARM64.
+ return true;
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ // We need some extra registers for this stub, they have been allocated
+ // but we need to save them before using them.
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ Register value = regs_.scratch0();
+ __ Ldr(value, MemOperand(regs_.address()));
+ __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
+
+ __ CheckPageFlagSet(regs_.object(),
+ value,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ Bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ Register address =
+ x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.Is(regs_.object()));
+ ASSERT(!address.Is(x0));
+ __ Mov(address, regs_.address());
+ __ Mov(x0, regs_.object());
+ __ Mov(x1, address);
+ __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ ExternalReference function =
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate());
+ __ CallCFunction(function, 3, 0);
+
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ Register mem_chunk = regs_.scratch0();
+ Register counter = regs_.scratch1();
+ __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
+ __ Ldr(counter,
+ MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
+ __ Subs(counter, counter, 1);
+ __ Str(counter,
+ MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
+ __ B(mi, &need_incremental);
+
+ // If the object is not black we don't have to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ Bind(&on_black);
+ // Get the value from the slot.
+ Register value = regs_.scratch0();
+ __ Ldr(value, MemOperand(regs_.address()));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlagClear(value,
+ regs_.scratch1(),
+ MemoryChunk::kEvacuationCandidateMask,
+ &ensure_not_white);
+
+ __ CheckPageFlagClear(regs_.object(),
+ regs_.scratch1(),
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ &need_incremental);
+
+ __ Bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.address(), regs_.object());
+ __ EnsureNotWhite(value,
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ regs_.scratch2(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ Bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ Bind(&need_incremental);
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // We patch these two first instructions back and forth between a nop and
+ // real branch when we start and stop incremental heap marking.
+ // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
+ // are generated.
+ // See RecordWriteStub::Patch for details.
+ {
+ InstructionAccurateScope scope(masm, 2);
+ __ adr(xzr, &skip_to_incremental_noncompacting);
+ __ adr(xzr, &skip_to_incremental_compacting);
+ }
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ Bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ Bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // x0 value element value to store
+ // x3 index_smi element index as smi
+ // sp[0] array_index_smi array literal index in function as smi
+ // sp[1] array array literal
+
+ Register value = x0;
+ Register index_smi = x3;
+
+ Register array = x1;
+ Register array_map = x2;
+ Register array_index_smi = x4;
+ __ PeekPair(array_index_smi, array, 0);
+ __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
+
+ Label double_elements, smi_element, fast_elements, slow_elements;
+ Register bitfield2 = x10;
+ __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
+
+ // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
+ // FAST_HOLEY_ELEMENTS.
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
+ __ B(hi, &double_elements);
+
+ __ JumpIfSmi(value, &smi_element);
+
+ // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
+ __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::kElementsKindShift),
+ &fast_elements);
+
+ // Store into the array literal requires an elements transition. Call into
+ // the runtime.
+ __ Bind(&slow_elements);
+ __ Push(array, index_smi, value);
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
+ __ Push(x11, array_index_smi);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ __ Bind(&fast_elements);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
+ __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(value, MemOperand(x11));
+ // Update the write barrier for the array store.
+ __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
+ __ Bind(&smi_element);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
+ __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
+ __ Ret();
+
+ __ Bind(&double_elements);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1,
+ &slow_elements);
+ __ Ret();
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ Ldr(x1, MemOperand(fp, parameter_count_offset));
+ if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ __ Add(x1, x1, 1);
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ Drop(x1);
+ // Return to IC Miss stub, continuation still on stack.
+ __ Ret();
+}
+
+
+// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
+// a "Push lr" instruction, followed by a call.
+static const unsigned int kProfileEntryHookCallSize =
+ Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ ProfileEntryHookStub stub;
+ Assembler::BlockConstPoolScope no_const_pools(masm);
+ Label entry_hook_call_start;
+ __ Bind(&entry_hook_call_start);
+ __ Push(lr);
+ __ CallStub(&stub);
+ ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
+ kProfileEntryHookCallSize);
+
+ __ Pop(lr);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+
+ // Save all kCallerSaved registers (including lr), since this can be called
+ // from anywhere.
+ // TODO(jbramley): What about FP registers?
+ __ PushCPURegList(kCallerSaved);
+ ASSERT(kCallerSaved.IncludesAliasOf(lr));
+ const int kNumSavedRegs = kCallerSaved.Count();
+
+ // Compute the function's address as the first argument.
+ __ Sub(x0, lr, kProfileEntryHookCallSize);
+
+#if V8_HOST_ARCH_ARM64
+ uintptr_t entry_hook =
+ reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
+ __ Mov(x10, entry_hook);
+#else
+ // Under the simulator we need to indirect the entry hook through a trampoline
+ // function at a known address.
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ __ Mov(x10, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ masm->isolate())));
+ // It additionally takes an isolate as a third parameter
+ __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
+#endif
+
+ // The caller's return address is above the saved temporaries.
+ // Grab its location for the second argument to the hook.
+ __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
+
+ {
+ // Create a dummy frame, as CallCFunction requires this.
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ CallCFunction(x10, 2, 0);
+ }
+
+ __ PopCPURegList(kCallerSaved);
+ __ Ret();
+}
+
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // When calling into C++ code the stack pointer must be csp.
+ // Therefore this code must use csp for peek/poke operations when the
+ // stub is generated. When the stub is called
+ // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
+ // and configure the stack pointer *before* doing the call.
+ const Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+
+ // Put return address on the stack (accessible to GC through exit frame pc).
+ __ Poke(lr, 0);
+ // Call the C++ function.
+ __ Blr(x10);
+ // Return to calling code.
+ __ Peek(lr, 0);
+ __ Ret();
+
+ __ SetStackPointer(old_stack_pointer);
+}
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ Register target) {
+ // Make sure the caller configured the stack pointer (see comment in
+ // DirectCEntryStub::Generate).
+ ASSERT(csp.Is(__ StackPointer()));
+
+ intptr_t code =
+ reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
+ __ Mov(x10, target);
+ // Branch to the stub.
+ __ Blr(lr);
+}
+
+
+// Probe the name dictionary in the 'elements' register.
+// Jump to the 'done' label if a property with the given name is found.
+// Jump to the 'miss' label otherwise.
+//
+// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
+// 'elements' and 'name' registers are preserved on miss.
+void NameDictionaryLookupStub::GeneratePositiveLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+
+ // Assert that name contains a string.
+ __ AssertName(name);
+
+ // Compute the capacity mask.
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
+ __ Sub(scratch1, scratch1, 1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Add(scratch2, scratch2, Operand(
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ }
+ __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ UseScratchRegisterScope temps(masm);
+ Register scratch3 = temps.AcquireX();
+ __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
+ __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
+ __ Cmp(name, scratch3);
+ __ B(eq, done);
+ }
+
+ // The inlined probes didn't find the entry.
+ // Call the complete stub to scan the whole dictionary.
+
+ CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
+ spill_list.Combine(lr);
+ spill_list.Remove(scratch1);
+ spill_list.Remove(scratch2);
+
+ __ PushCPURegList(spill_list);
+
+ if (name.is(x0)) {
+ ASSERT(!elements.is(x1));
+ __ Mov(x1, name);
+ __ Mov(x0, elements);
+ } else {
+ __ Mov(x0, elements);
+ __ Mov(x1, name);
+ }
+
+ Label not_found;
+ NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ Cbz(x0, &not_found);
+ __ Mov(scratch2, x2); // Move entry index into scratch2.
+ __ PopCPURegList(spill_list);
+ __ B(done);
+
+ __ Bind(&not_found);
+ __ PopCPURegList(spill_list);
+ __ B(miss);
+}
+
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0) {
+ ASSERT(!AreAliased(receiver, properties, scratch0));
+ ASSERT(name->IsUniqueName());
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the hole value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
+ __ Sub(index, index, 1);
+ __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ Register tmp = index;
+ __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
+
+ // Stop if found the property.
+ __ Cmp(entity_name, Operand(name));
+ __ B(eq, miss);
+
+ Label good;
+ __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
+
+ // Check if the entry name is not a unique name.
+ __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ Ldrb(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entity_name, miss);
+ __ Bind(&good);
+ }
+
+ CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
+ spill_list.Combine(lr);
+ spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
+
+ __ PushCPURegList(spill_list);
+
+ __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Mov(x1, Operand(name));
+ NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ __ CallStub(&stub);
+ // Move stub return value to scratch0. Note that scratch0 is not included in
+ // spill_list and won't be clobbered by PopCPURegList.
+ __ Mov(scratch0, x0);
+ __ PopCPURegList(spill_list);
+
+ __ Cbz(scratch0, done);
+ __ B(miss);
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
+ //
+ // Arguments are in x0 and x1:
+ // x0: property dictionary.
+ // x1: the name of the property we are looking for.
+ //
+ // Return value is in x0 and is zero if lookup failed, non zero otherwise.
+ // If the lookup is successful, x2 will contains the index of the entry.
+
+ Register result = x0;
+ Register dictionary = x0;
+ Register key = x1;
+ Register index = x2;
+ Register mask = x3;
+ Register hash = x4;
+ Register undefined = x5;
+ Register entry_key = x6;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
+ __ Sub(mask, mask, 1);
+
+ __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Add(index, hash,
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift);
+ } else {
+ __ Mov(index, hash);
+ }
+ __ And(index, mask, Operand(index, LSR, Name::kHashShift));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ Cmp(entry_key, undefined);
+ __ B(eq, &not_in_dictionary);
+
+ // Stop if found the property.
+ __ Cmp(entry_key, key);
+ __ B(eq, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a unique name.
+ __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ }
+ }
+
+ __ Bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup, probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ Mov(result, 0);
+ __ Ret();
+ }
+
+ __ Bind(&in_dictionary);
+ __ Mov(result, 1);
+ __ Ret();
+
+ __ Bind(&not_in_dictionary);
+ __ Mov(result, 0);
+ __ Ret();
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ ASM_LOCATION("CreateArrayDispatch");
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(), mode);
+ __ TailCallStub(&stub);
+
+ } else if (mode == DONT_OVERRIDE) {
+ Register kind = x3;
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
+ // TODO(jbramley): Is this the best way to handle this? Can we make the
+ // tail calls conditional, rather than hopping over each one?
+ __ CompareAndBranch(kind, candidate_kind, ne, &next);
+ T stub(candidate_kind);
+ __ TailCallStub(&stub);
+ __ Bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+// TODO(jbramley): If this needs to be a special case, make it a proper template
+// specialization, and not a separate function.
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ ASM_LOCATION("CreateArrayDispatchOneArgument");
+ // x0 - argc
+ // x1 - constructor?
+ // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // sp[0] - last argument
+
+ Register allocation_site = x2;
+ Register kind = x3;
+
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // Is the low bit set? If so, the array is holey.
+ __ Tbnz(kind, 0, &normal_sequence);
+ }
+
+ // Look at the last argument.
+ // TODO(jbramley): What does a 0 argument represent?
+ __ Peek(x10, 0);
+ __ Cbz(x10, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ Bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the slot).
+ __ Orr(kind, kind, 1);
+
+ if (FLAG_debug_code) {
+ __ Ldr(x10, FieldMemOperand(allocation_site, 0));
+ __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
+ &normal_sequence);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Save the resulting elements kind in type info. We can't just store 'kind'
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field; upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ Ldr(x11, FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+ __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
+ __ Str(x11, FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+
+ __ Bind(&normal_sequence);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
+ __ CompareAndBranch(kind, candidate_kind, ne, &next);
+ ArraySingleArgumentConstructorStub stub(candidate_kind);
+ __ TailCallStub(&stub);
+ __ Bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(kind);
+ stub.GetCode(isolate);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate);
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate);
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ Register argc = x0;
+ if (argument_count_ == ANY) {
+ Label zero_case, n_case;
+ __ Cbz(argc, &zero_case);
+ __ Cmp(argc, 1);
+ __ B(ne, &n_case);
+
+ // One argument.
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ Bind(&n_case);
+ // N arguments.
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("ArrayConstructorStub::Generate");
+ // ----------- S t a t e -------------
+ // -- x0 : argc (only if argument_count_ == ANY)
+ // -- x1 : constructor
+ // -- x2 : AllocationSite or undefined
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ Register constructor = x1;
+ Register allocation_site = x2;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+
+ // We should either have undefined in the allocation_site register or a
+ // valid AllocationSite.
+ __ AssertUndefinedOrAllocationSite(allocation_site, x10);
+ }
+
+ Register kind = x3;
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
+
+ __ Ldrsw(kind,
+ UntagSmiFieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+ __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ Bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label zero_case, n_case;
+ Register argc = x0;
+
+ __ Cbz(argc, &zero_case);
+ __ CompareAndBranch(argc, 1, ne, &n_case);
+
+ // One argument.
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+
+ __ Bind(&packed_case);
+ }
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ Bind(&n_case);
+ // N arguments.
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- x1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+
+ Register constructor = x1;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+ }
+
+ Register kind = w3;
+ // Figure out the right elements kind
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Retrieve elements_kind from map.
+ __ LoadElementsKindFromMap(kind, x10);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Cmp(x3, FAST_ELEMENTS);
+ __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
+ __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ }
+
+ Label fast_elements_case;
+ __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ Bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : callee
+ // -- x4 : call_data
+ // -- x2 : holder
+ // -- x1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1) * 8] : first argument
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ Isolate* isolate = masm->isolate();
+
+ // FunctionCallbackArguments: context, callee and call data.
+ __ Push(context, callee, call_data);
+
+ // Load context from callee
+ __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ if (!call_data_undefined) {
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ }
+ Register isolate_reg = x5;
+ __ Mov(isolate_reg, ExternalReference::isolate_address(isolate));
+
+ // FunctionCallbackArguments:
+ // return value, return value default, isolate, holder.
+ __ Push(call_data, call_data, isolate_reg, holder);
+
+ // Prepare arguments.
+ Register args = x6;
+ __ Mov(args, masm->StackPointer());
+
+ // Allocate the v8::Arguments structure in the arguments' space, since it's
+ // not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ // Allocate space for CallApiFunctionAndReturn can store some scratch
+ // registeres on the stack.
+ const int kCallApiFunctionSpillSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ ASSERT(!AreAliased(x0, api_function_address));
+ // x0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
+ // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
+ __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc and
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ Mov(x10, argc);
+ __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ const int spill_offset = 1 + kApiStackSpace;
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ spill_offset,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
+ // -- ...
+ // -- x2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = x2;
+
+ __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
+ __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
+
+ const int kApiStackSpace = 1;
+
+ // Allocate space for CallApiFunctionAndReturn can store some scratch
+ // registeres on the stack.
+ const int kCallApiFunctionSpillSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // x1 (internal::Object** args_) as the data.
+ __ Poke(x1, 1 * kPointerSize);
+ __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ ExternalReference::PROFILING_GETTER_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
+ const int spill_offset = 1 + kApiStackSpace;
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ spill_offset,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
new file mode 100644
index 0000000000..7e09ffa57c
--- /dev/null
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -0,0 +1,500 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
+#define V8_ARM64_CODE_STUBS_ARM64_H_
+
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
+class StoreBufferOverflowStub: public PlatformCodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // TODO(all): These don't seem to be used any more. Delete them.
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class StoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ explicit StoreRegistersStateStub(SaveFPRegsMode with_fp)
+ : save_doubles_(with_fp) {}
+
+ static Register to_be_pushed_lr() { return ip0; }
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() { return StoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class RestoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ explicit RestoreRegistersStateStub(SaveFPRegsMode with_fp)
+ : save_doubles_(with_fp) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() { return RestoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class RecordWriteStub: public PlatformCodeStub {
+ public:
+ // Stub to record the write of 'value' at 'address' in 'object'.
+ // Typically 'address' = 'object' + <some offset>.
+ // See MacroAssembler::RecordWriteField() for example.
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static Mode GetMode(Code* stub) {
+ // Find the mode depending on the first two instructions.
+ Instruction* instr1 =
+ reinterpret_cast<Instruction*>(stub->instruction_start());
+ Instruction* instr2 = instr1->following();
+
+ if (instr1->IsUncondBranchImm()) {
+ ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
+ return INCREMENTAL;
+ }
+
+ ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
+
+ if (instr2->IsUncondBranchImm()) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(instr2->IsPCRelAddressing());
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ // We patch the two first instructions of the stub back and forth between an
+ // adr and branch when we start and stop incremental heap marking.
+ // The branch is
+ // b label
+ // The adr is
+ // adr xzr label
+ // so effectively a nop.
+ static void Patch(Code* stub, Mode mode) {
+ // We are going to patch the two first instructions of the stub.
+ PatchingAssembler patcher(
+ reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
+ Instruction* instr1 = patcher.InstructionAt(0);
+ Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
+ // Instructions must be either 'adr' or 'b'.
+ ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
+ ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
+ // Retrieve the offsets to the labels.
+ int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
+ int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
+
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ }
+
+ private:
+ // This is a helper class to manage the registers associated with the stub.
+ // The 'object' and 'address' registers must be preserved.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch),
+ saved_regs_(kCallerSaved) {
+ ASSERT(!AreAliased(scratch, object, address));
+
+ // We would like to require more scratch registers for this stub,
+ // but the number of registers comes down to the ones used in
+ // FullCodeGen::SetVar(), which is architecture independent.
+ // We allocate 2 extra scratch registers that we'll save on the stack.
+ CPURegList pool_available = GetValidRegistersForAllocation();
+ CPURegList used_regs(object, address, scratch);
+ pool_available.Remove(used_regs);
+ scratch1_ = Register(pool_available.PopLowestIndex());
+ scratch2_ = Register(pool_available.PopLowestIndex());
+
+ // SaveCallerRegisters method needs to save caller saved register, however
+ // we don't bother saving ip0 and ip1 because they are used as scratch
+ // registers by the MacroAssembler.
+ saved_regs_.Remove(ip0);
+ saved_regs_.Remove(ip1);
+
+ // The scratch registers will be restored by other means so we don't need
+ // to save them with the other caller saved registers.
+ saved_regs_.Remove(scratch0_);
+ saved_regs_.Remove(scratch1_);
+ saved_regs_.Remove(scratch2_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->Push(scratch1_, scratch2_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->Pop(scratch2_, scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ masm->PushCPURegList(saved_regs_);
+ if (mode == kSaveFPRegs) {
+ masm->PushCPURegList(kCallerSavedFP);
+ }
+ }
+
+ void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ if (mode == kSaveFPRegs) {
+ masm->PopCPURegList(kCallerSavedFP);
+ }
+ masm->PopCPURegList(saved_regs_);
+ }
+
+ Register object() { return object_; }
+ Register address() { return address_; }
+ Register scratch0() { return scratch0_; }
+ Register scratch1() { return scratch1_; }
+ Register scratch2() { return scratch2_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+ Register scratch2_;
+ CPURegList saved_regs_;
+
+ // TODO(all): We should consider moving this somewhere else.
+ static CPURegList GetValidRegistersForAllocation() {
+ // The list of valid registers for allocation is defined as all the
+ // registers without those with a special meaning.
+ //
+ // The default list excludes registers x26 to x31 because they are
+ // reserved for the following purpose:
+ // - x26 root register
+ // - x27 context pointer register
+ // - x28 jssp
+ // - x29 frame pointer
+ // - x30 link register(lr)
+ // - x31 xzr/stack pointer
+ CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
+
+ // We also remove MacroAssembler's scratch registers.
+ list.Remove(ip0);
+ list.Remove(ip1);
+ list.Remove(x8);
+ list.Remove(x9);
+
+ return list;
+ }
+
+ friend class RecordWriteStub;
+ };
+
+ // A list of stub variants which are pregenerated.
+ // The variants are stored in the same format as the minor key, so
+ // MinorKeyFor() can be used to populate and check this list.
+ static const int kAheadOfTime[];
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return MinorKeyFor(object_, value_, address_, remembered_set_action_,
+ save_fp_regs_mode_);
+ }
+
+ static int MinorKeyFor(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction action,
+ SaveFPRegsMode fp_mode) {
+ ASSERT(object.Is64Bits());
+ ASSERT(value.Is64Bits());
+ ASSERT(address.Is64Bits());
+ return ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(action) |
+ SaveFPRegsModeBits::encode(fp_mode);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 5> {};
+ class ValueBits: public BitField<int, 5, 5> {};
+ class AddressBits: public BitField<int, 10, 5> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
+// Helper to call C++ functions from generated code. The caller must prepare
+// the exit frame before doing the call with GenerateCall.
+class DirectCEntryStub: public PlatformCodeStub {
+ public:
+ DirectCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+};
+
+
+class NameDictionaryLookupStub: public PlatformCodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2);
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+
+ Major MajorKey() { return NameDictionaryLookup; }
+
+ int MinorKey() {
+ return LookupModeBits::encode(mode_);
+ }
+
+ class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+ LookupMode mode_;
+};
+
+
+class SubStringStub: public PlatformCodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public PlatformCodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compares two flat ASCII strings and returns result in x0.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Compare two flat ASCII strings for equality and returns result
+ // in x0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal);
+};
+
+
+struct PlatformCallInterfaceDescriptor {
+ explicit PlatformCallInterfaceDescriptor(
+ TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) { }
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CODE_STUBS_ARM64_H_
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
new file mode 100644
index 0000000000..831d449862
--- /dev/null
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -0,0 +1,615 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "codegen.h"
+#include "macro-assembler.h"
+#include "simulator-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_arm64_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ Simulator * simulator = Simulator::current(Isolate::Current());
+ Simulator::CallArgument args[] = {
+ Simulator::CallArgument(x),
+ Simulator::CallArgument::End()
+ };
+ return simulator->CallDouble(fast_exp_arm64_machine_code, args);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &std::exp;
+
+ // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
+ // an AAPCS64-compliant exp() function. This will be faster than the C
+ // library's exp() function, but probably less accurate.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::exp;
+
+ ExternalReference::InitializeMathExpData();
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ masm.SetStackPointer(csp);
+
+ // The argument will be in d0 on entry.
+ DoubleRegister input = d0;
+ // Use other caller-saved registers for all other values.
+ DoubleRegister result = d1;
+ DoubleRegister double_temp1 = d2;
+ DoubleRegister double_temp2 = d3;
+ Register temp1 = x10;
+ Register temp2 = x11;
+ Register temp3 = x12;
+
+ MathExpGenerator::EmitMathExp(&masm, input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3);
+ // Move the result to the return register.
+ masm.Fmov(d0, result);
+ masm.Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_arm64_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ return &std::sqrt;
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ // ----------- S t a t e -------------
+ // -- x2 : receiver
+ // -- x3 : target map
+ // -----------------------------------
+ Register receiver = x2;
+ Register map = x3;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
+ allocation_memento_found);
+ }
+
+ // Set transitioned map.
+ __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver,
+ HeapObject::kMapOffset,
+ map,
+ x10,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- x3 : target map, scratch for subsequent call
+ // -----------------------------------
+ Register receiver = x2;
+ Register target_map = x3;
+
+ Label gc_required, only_change_map;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ Register elements = x4;
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
+
+ __ Push(lr);
+ Register length = x5;
+ __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
+ FixedArray::kLengthOffset));
+
+ // Allocate new FixedDoubleArray.
+ Register array_size = x6;
+ Register array = x7;
+ __ Lsl(array_size, length, kDoubleSizeLog2);
+ __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
+ __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
+ // Register array is non-tagged heap object.
+
+ // Set the destination FixedDoubleArray's length and map.
+ Register map_root = x6;
+ __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
+ __ SmiTag(x11, length);
+ __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ Add(x10, array, kHeapObjectTag);
+ __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
+ x6, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Prepare for conversion loop.
+ Register src_elements = x10;
+ Register dst_elements = x11;
+ Register dst_end = x12;
+ __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
+ __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
+
+ FPRegister nan_d = d1;
+ __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
+
+ Label entry, done;
+ __ B(&entry);
+
+ __ Bind(&only_change_map);
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ B(&done);
+
+ // Call into runtime if GC is required.
+ __ Bind(&gc_required);
+ __ Pop(lr);
+ __ B(fail);
+
+ // Iterate over the array, copying and coverting smis to doubles. If an
+ // element is non-smi, write a hole to the destination.
+ {
+ Label loop;
+ __ Bind(&loop);
+ __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
+ __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
+ __ Tst(x13, kSmiTagMask);
+ __ Fcsel(d0, d0, nan_d, eq);
+ __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
+
+ __ Bind(&entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &loop);
+ }
+
+ __ Pop(lr);
+ __ Bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -- x3 : target map, scratch for subsequent call
+ // -- x4 : scratch (elements)
+ // -----------------------------------
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register target_map = x3;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ Label only_change_map;
+ Register elements = x4;
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
+
+ __ Push(lr);
+ // TODO(all): These registers may not need to be pushed. Examine
+ // RecordWriteStub and check whether it's needed.
+ __ Push(target_map, receiver, key, value);
+ Register length = x5;
+ __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
+ FixedArray::kLengthOffset));
+
+ // Allocate new FixedArray.
+ Register array_size = x6;
+ Register array = x7;
+ Label gc_required;
+ __ Mov(array_size, FixedDoubleArray::kHeaderSize);
+ __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
+ __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
+
+ // Set destination FixedDoubleArray's length and map.
+ Register map_root = x6;
+ __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
+ __ SmiTag(x11, length);
+ __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+
+ // Prepare for conversion loop.
+ Register src_elements = x10;
+ Register dst_elements = x11;
+ Register dst_end = x12;
+ __ Add(src_elements, elements,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(dst_elements, array, FixedArray::kHeaderSize);
+ __ Add(array, array, kHeapObjectTag);
+ __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
+
+ Register the_hole = x14;
+ Register heap_num_map = x15;
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
+
+ Label entry;
+ __ B(&entry);
+
+ // Call into runtime if GC is required.
+ __ Bind(&gc_required);
+ __ Pop(value, key, receiver, target_map);
+ __ Pop(lr);
+ __ B(fail);
+
+ {
+ Label loop, convert_hole;
+ __ Bind(&loop);
+ __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
+ __ Cmp(x13, kHoleNanInt64);
+ __ B(eq, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ Register heap_num = x5;
+ __ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
+ __ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
+ __ Mov(x13, dst_elements);
+ __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
+ __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ __ B(&entry);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ Bind(&convert_hole);
+ __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
+
+ __ Bind(&entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &loop);
+ }
+
+ __ Pop(value, key, receiver, target_map);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
+ kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Pop(lr);
+
+ __ Bind(&only_change_map);
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ return MacroAssembler::IsYoungSequence(sequence);
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ byte* target = sequence + kCodeAgeStubEntryOffset;
+ Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize);
+ if (age == kNoAgeCodeAge) {
+ MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
+ } else {
+ Code * stub = GetCodeAgeStub(isolate, age, parity);
+ MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
+ }
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ Ldr(result.W(),
+ UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ Add(index, index, result.W());
+ __ B(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ Bind(&cons_string);
+ __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
+ // Get the first of the two strings and load its instance type.
+ __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ Bind(&indirect_string_loaded);
+ __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ Bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ B(&check_encoding);
+
+ // Handle external strings.
+ __ Bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ Tst(result, kIsIndirectStringMask);
+ __ Assert(eq, kExternalStringExpectedButNotFound);
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
+ // can be bound far away in deferred code.
+ __ Tst(result, kShortExternalStringMask);
+ __ B(ne, call_runtime);
+ __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label ascii, done;
+ __ Bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
+ // Two-byte string.
+ __ Ldrh(result, MemOperand(string, index, SXTW, 1));
+ __ B(&done);
+ __ Bind(&ascii);
+ // Ascii string.
+ __ Ldrb(result, MemOperand(string, index, SXTW));
+ __ Bind(&done);
+}
+
+
+static MemOperand ExpConstant(Register base, int index) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_temp1,
+ DoubleRegister double_temp2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ // TODO(jbramley): There are several instances where fnmsub could be used
+ // instead of fmul and fsub. Doing this changes the result, but since this is
+ // an estimation anyway, does it matter?
+
+ ASSERT(!AreAliased(input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+ DoubleRegister double_temp3 = result;
+ Register constants = temp3;
+
+ // The algorithm used relies on some magic constants which are initialized in
+ // ExternalReference::InitializeMathExpData().
+
+ // Load the address of the start of the array.
+ __ Mov(constants, ExternalReference::math_exp_constants(0));
+
+ // We have to do a four-way split here:
+ // - If input <= about -708.4, the output always rounds to zero.
+ // - If input >= about 709.8, the output always rounds to +infinity.
+ // - If the input is NaN, the output is NaN.
+ // - Otherwise, the result needs to be calculated.
+ Label result_is_finite_non_zero;
+ // Assert that we can load offset 0 (the small input threshold) and offset 1
+ // (the large input threshold) with a single ldp.
+ ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
+ ExpConstant(constants, 0).offset()));
+ __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
+
+ __ Fcmp(input, double_temp1);
+ __ Fccmp(input, double_temp2, NoFlag, hi);
+ // At this point, the condition flags can be in one of five states:
+ // NZCV
+ // 1000 -708.4 < input < 709.8 result = exp(input)
+ // 0110 input == 709.8 result = +infinity
+ // 0010 input > 709.8 result = +infinity
+ // 0011 input is NaN result = input
+ // 0000 input <= -708.4 result = +0.0
+
+ // Continue the common case first. 'mi' tests N == 1.
+ __ B(&result_is_finite_non_zero, mi);
+
+ // TODO(jbramley): Consider adding a +infinity register for ARM64.
+ __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
+
+ // Select between +0.0 and +infinity. 'lo' tests C == 0.
+ __ Fcsel(result, fp_zero, double_temp2, lo);
+ // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
+ __ Fcsel(result, result, input, vc);
+ __ B(&done);
+
+ // The rest is magic, as described in InitializeMathExpData().
+ __ Bind(&result_is_finite_non_zero);
+
+ // Assert that we can load offset 3 and offset 4 with a single ldp.
+ ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
+ ExpConstant(constants, 3).offset()));
+ __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
+ __ Fmadd(double_temp1, double_temp1, input, double_temp3);
+ __ Fmov(temp2.W(), double_temp1.S());
+ __ Fsub(double_temp1, double_temp1, double_temp3);
+
+ // Assert that we can load offset 5 and offset 6 with a single ldp.
+ ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
+ ExpConstant(constants, 5).offset()));
+ __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
+ // TODO(jbramley): Consider using Fnmsub here.
+ __ Fmul(double_temp1, double_temp1, double_temp2);
+ __ Fsub(double_temp1, double_temp1, input);
+
+ __ Fmul(double_temp2, double_temp1, double_temp1);
+ __ Fsub(double_temp3, double_temp3, double_temp1);
+ __ Fmul(double_temp3, double_temp3, double_temp2);
+
+ __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
+
+ __ Ldr(double_temp2, ExpConstant(constants, 7));
+ // TODO(jbramley): Consider using Fnmsub here.
+ __ Fmul(double_temp3, double_temp3, double_temp2);
+ __ Fsub(double_temp3, double_temp3, double_temp1);
+
+ // The 8th constant is 1.0, so use an immediate move rather than a load.
+ // We can't generate a runtime assertion here as we would need to call Abort
+ // in the runtime and we don't have an Isolate when we generate this code.
+ __ Fmov(double_temp2, 1.0);
+ __ Fadd(double_temp3, double_temp3, double_temp2);
+
+ __ And(temp2, temp2, 0x7ff);
+ __ Add(temp1, temp1, 0x3ff);
+
+ // Do the final table lookup.
+ __ Mov(temp3, ExternalReference::math_exp_log_table());
+
+ __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
+ __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
+ __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
+ __ Bfi(temp2, temp1, 32, 32);
+ __ Fmov(double_temp1, temp2);
+
+ __ Fmul(result, double_temp3, double_temp1);
+
+ __ Bind(&done);
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/codegen-arm64.h b/deps/v8/src/arm64/codegen-arm64.h
new file mode 100644
index 0000000000..4d8a9a85a7
--- /dev/null
+++ b/deps/v8/src/arm64/codegen-arm64.h
@@ -0,0 +1,71 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_CODEGEN_ARM64_H_
+#define V8_ARM64_CODEGEN_ARM64_H_
+
+#include "ast.h"
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output. Register index is asserted to be a 32-bit W
+ // register.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CODEGEN_ARM64_H_
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
new file mode 100644
index 0000000000..8866e23cf1
--- /dev/null
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -0,0 +1,1271 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_CONSTANTS_ARM64_H_
+#define V8_ARM64_CONSTANTS_ARM64_H_
+
+
+// Assert that this is an LP64 system.
+STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
+STATIC_ASSERT(sizeof(void *) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(1) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+
+
+// Get the standard printf format macros for C99 stdint types.
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
+
+namespace v8 {
+namespace internal {
+
+
+const unsigned kInstructionSize = 4;
+const unsigned kInstructionSizeLog2 = 2;
+const unsigned kLiteralEntrySize = 4;
+const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kMaxLoadLiteralRange = 1 * MB;
+
+const unsigned kNumberOfRegisters = 32;
+const unsigned kNumberOfFPRegisters = 32;
+// Callee saved registers are x19-x30(lr).
+const int kNumberOfCalleeSavedRegisters = 11;
+const int kFirstCalleeSavedRegisterIndex = 19;
+// Callee saved FP registers are d8-d15.
+const int kNumberOfCalleeSavedFPRegisters = 8;
+const int kFirstCalleeSavedFPRegisterIndex = 8;
+// Callee saved registers with no specific purpose in JS are x19-x25.
+const unsigned kJSCalleeSavedRegList = 0x03f80000;
+// TODO(all): k<Y>RegSize should probably be k<Y>RegSizeInBits.
+const unsigned kWRegSizeInBits = 32;
+const unsigned kWRegSizeInBitsLog2 = 5;
+const unsigned kWRegSize = kWRegSizeInBits >> 3;
+const unsigned kWRegSizeLog2 = kWRegSizeInBitsLog2 - 3;
+const unsigned kXRegSizeInBits = 64;
+const unsigned kXRegSizeInBitsLog2 = 6;
+const unsigned kXRegSize = kXRegSizeInBits >> 3;
+const unsigned kXRegSizeLog2 = kXRegSizeInBitsLog2 - 3;
+const unsigned kSRegSizeInBits = 32;
+const unsigned kSRegSizeInBitsLog2 = 5;
+const unsigned kSRegSize = kSRegSizeInBits >> 3;
+const unsigned kSRegSizeLog2 = kSRegSizeInBitsLog2 - 3;
+const unsigned kDRegSizeInBits = 64;
+const unsigned kDRegSizeInBitsLog2 = 6;
+const unsigned kDRegSize = kDRegSizeInBits >> 3;
+const unsigned kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3;
+const int64_t kWRegMask = 0x00000000ffffffffL;
+const int64_t kXRegMask = 0xffffffffffffffffL;
+const int64_t kSRegMask = 0x00000000ffffffffL;
+const int64_t kDRegMask = 0xffffffffffffffffL;
+// TODO(all) check if the expression below works on all compilers or if it
+// triggers an overflow error.
+const int64_t kDSignBit = 63;
+const int64_t kDSignMask = 0x1L << kDSignBit;
+const int64_t kSSignBit = 31;
+const int64_t kSSignMask = 0x1L << kSSignBit;
+const int64_t kXSignBit = 63;
+const int64_t kXSignMask = 0x1L << kXSignBit;
+const int64_t kWSignBit = 31;
+const int64_t kWSignMask = 0x1L << kWSignBit;
+const int64_t kDQuietNanBit = 51;
+const int64_t kDQuietNanMask = 0x1L << kDQuietNanBit;
+const int64_t kSQuietNanBit = 22;
+const int64_t kSQuietNanMask = 0x1L << kSQuietNanBit;
+const int64_t kByteMask = 0xffL;
+const int64_t kHalfWordMask = 0xffffL;
+const int64_t kWordMask = 0xffffffffL;
+const uint64_t kXMaxUInt = 0xffffffffffffffffUL;
+const uint64_t kWMaxUInt = 0xffffffffUL;
+const int64_t kXMaxInt = 0x7fffffffffffffffL;
+const int64_t kXMinInt = 0x8000000000000000L;
+const int32_t kWMaxInt = 0x7fffffff;
+const int32_t kWMinInt = 0x80000000;
+const unsigned kFramePointerRegCode = 29;
+const unsigned kLinkRegCode = 30;
+const unsigned kZeroRegCode = 31;
+const unsigned kJSSPCode = 28;
+const unsigned kSPRegInternalCode = 63;
+const unsigned kRegCodeMask = 0x1f;
+// Standard machine types defined by AAPCS64.
+const unsigned kByteSize = 8;
+const unsigned kByteSizeInBytes = kByteSize >> 3;
+const unsigned kHalfWordSize = 16;
+const unsigned kHalfWordSizeLog2 = 4;
+const unsigned kHalfWordSizeInBytes = kHalfWordSize >> 3;
+const unsigned kHalfWordSizeInBytesLog2 = kHalfWordSizeLog2 - 3;
+const unsigned kWordSize = 32;
+const unsigned kWordSizeLog2 = 5;
+const unsigned kWordSizeInBytes = kWordSize >> 3;
+const unsigned kWordSizeInBytesLog2 = kWordSizeLog2 - 3;
+const unsigned kDoubleWordSize = 64;
+const unsigned kDoubleWordSizeInBytes = kDoubleWordSize >> 3;
+const unsigned kQuadWordSize = 128;
+const unsigned kQuadWordSizeInBytes = kQuadWordSize >> 3;
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define INSTRUCTION_FIELDS_LIST(V_) \
+/* Register fields */ \
+V_(Rd, 4, 0, Bits) /* Destination register. */ \
+V_(Rn, 9, 5, Bits) /* First source register. */ \
+V_(Rm, 20, 16, Bits) /* Second source register. */ \
+V_(Ra, 14, 10, Bits) /* Third source register. */ \
+V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
+V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
+ /* store second source. */ \
+V_(PrefetchMode, 4, 0, Bits) \
+ \
+/* Common bits */ \
+V_(SixtyFourBits, 31, 31, Bits) \
+V_(FlagsUpdate, 29, 29, Bits) \
+ \
+/* PC relative addressing */ \
+V_(ImmPCRelHi, 23, 5, SignedBits) \
+V_(ImmPCRelLo, 30, 29, Bits) \
+ \
+/* Add/subtract/logical shift register */ \
+V_(ShiftDP, 23, 22, Bits) \
+V_(ImmDPShift, 15, 10, Bits) \
+ \
+/* Add/subtract immediate */ \
+V_(ImmAddSub, 21, 10, Bits) \
+V_(ShiftAddSub, 23, 22, Bits) \
+ \
+/* Add/substract extend */ \
+V_(ImmExtendShift, 12, 10, Bits) \
+V_(ExtendMode, 15, 13, Bits) \
+ \
+/* Move wide */ \
+V_(ImmMoveWide, 20, 5, Bits) \
+V_(ShiftMoveWide, 22, 21, Bits) \
+ \
+/* Logical immediate, bitfield and extract */ \
+V_(BitN, 22, 22, Bits) \
+V_(ImmRotate, 21, 16, Bits) \
+V_(ImmSetBits, 15, 10, Bits) \
+V_(ImmR, 21, 16, Bits) \
+V_(ImmS, 15, 10, Bits) \
+ \
+/* Test and branch immediate */ \
+V_(ImmTestBranch, 18, 5, SignedBits) \
+V_(ImmTestBranchBit40, 23, 19, Bits) \
+V_(ImmTestBranchBit5, 31, 31, Bits) \
+ \
+/* Conditionals */ \
+V_(Condition, 15, 12, Bits) \
+V_(ConditionBranch, 3, 0, Bits) \
+V_(Nzcv, 3, 0, Bits) \
+V_(ImmCondCmp, 20, 16, Bits) \
+V_(ImmCondBranch, 23, 5, SignedBits) \
+ \
+/* Floating point */ \
+V_(FPType, 23, 22, Bits) \
+V_(ImmFP, 20, 13, Bits) \
+V_(FPScale, 15, 10, Bits) \
+ \
+/* Load Store */ \
+V_(ImmLS, 20, 12, SignedBits) \
+V_(ImmLSUnsigned, 21, 10, Bits) \
+V_(ImmLSPair, 21, 15, SignedBits) \
+V_(SizeLS, 31, 30, Bits) \
+V_(ImmShiftLS, 12, 12, Bits) \
+ \
+/* Other immediates */ \
+V_(ImmUncondBranch, 25, 0, SignedBits) \
+V_(ImmCmpBranch, 23, 5, SignedBits) \
+V_(ImmLLiteral, 23, 5, SignedBits) \
+V_(ImmException, 20, 5, Bits) \
+V_(ImmHint, 11, 5, Bits) \
+V_(ImmBarrierDomain, 11, 10, Bits) \
+V_(ImmBarrierType, 9, 8, Bits) \
+ \
+/* System (MRS, MSR) */ \
+V_(ImmSystemRegister, 19, 5, Bits) \
+V_(SysO0, 19, 19, Bits) \
+V_(SysOp1, 18, 16, Bits) \
+V_(SysOp2, 7, 5, Bits) \
+V_(CRn, 15, 12, Bits) \
+V_(CRm, 11, 8, Bits) \
+
+
+#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
+/* NZCV */ \
+V_(Flags, 31, 28, Bits, uint32_t) \
+V_(N, 31, 31, Bits, bool) \
+V_(Z, 30, 30, Bits, bool) \
+V_(C, 29, 29, Bits, bool) \
+V_(V, 28, 28, Bits, uint32_t) \
+M_(NZCV, Flags_mask) \
+ \
+/* FPCR */ \
+V_(AHP, 26, 26, Bits, bool) \
+V_(DN, 25, 25, Bits, bool) \
+V_(FZ, 24, 24, Bits, bool) \
+V_(RMode, 23, 22, Bits, FPRounding) \
+M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
+
+
+// Fields offsets.
+#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2) \
+ const int Name##_offset = LowBit; \
+ const int Name##_width = HighBit - LowBit + 1; \
+ const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
+#define DECLARE_INSTRUCTION_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1) \
+ DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2)
+#define NOTHING(A, B)
+INSTRUCTION_FIELDS_LIST(DECLARE_INSTRUCTION_FIELDS_OFFSETS)
+SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
+#undef NOTHING
+#undef DECLARE_FIELDS_OFFSETS
+#undef DECLARE_INSTRUCTION_FIELDS_OFFSETS
+
+// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed
+// from ImmPCRelLo and ImmPCRelHi.
+const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
+
+// Condition codes.
+enum Condition {
+ eq = 0,
+ ne = 1,
+ hs = 2,
+ lo = 3,
+ mi = 4,
+ pl = 5,
+ vs = 6,
+ vc = 7,
+ hi = 8,
+ ls = 9,
+ ge = 10,
+ lt = 11,
+ gt = 12,
+ le = 13,
+ al = 14,
+ nv = 15 // Behaves as always/al.
+};
+
+inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no never condition.
+ ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+}
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseConditionForCmp(Condition cond) {
+ switch (cond) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ case eq:
+ return eq;
+ default:
+ // In practice this function is only used with a condition coming from
+ // TokenToCondition in lithium-codegen-arm64.cc. Any other condition is
+ // invalid as it doesn't necessary make sense to reverse it (consider
+ // 'mi' for instance).
+ UNREACHABLE();
+ return nv;
+ };
+}
+
+enum FlagsUpdate {
+ SetFlags = 1,
+ LeaveFlags = 0
+};
+
+enum StatusFlags {
+ NoFlag = 0,
+
+ // Derive the flag combinations from the system register bit descriptions.
+ NFlag = N_mask,
+ ZFlag = Z_mask,
+ CFlag = C_mask,
+ VFlag = V_mask,
+ NZFlag = NFlag | ZFlag,
+ NCFlag = NFlag | CFlag,
+ NVFlag = NFlag | VFlag,
+ ZCFlag = ZFlag | CFlag,
+ ZVFlag = ZFlag | VFlag,
+ CVFlag = CFlag | VFlag,
+ NZCFlag = NFlag | ZFlag | CFlag,
+ NZVFlag = NFlag | ZFlag | VFlag,
+ NCVFlag = NFlag | CFlag | VFlag,
+ ZCVFlag = ZFlag | CFlag | VFlag,
+ NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
+
+ // Floating-point comparison results.
+ FPEqualFlag = ZCFlag,
+ FPLessThanFlag = NFlag,
+ FPGreaterThanFlag = CFlag,
+ FPUnorderedFlag = CVFlag
+};
+
+enum Shift {
+ NO_SHIFT = -1,
+ LSL = 0x0,
+ LSR = 0x1,
+ ASR = 0x2,
+ ROR = 0x3
+};
+
+enum Extend {
+ NO_EXTEND = -1,
+ UXTB = 0,
+ UXTH = 1,
+ UXTW = 2,
+ UXTX = 3,
+ SXTB = 4,
+ SXTH = 5,
+ SXTW = 6,
+ SXTX = 7
+};
+
+enum SystemHint {
+ NOP = 0,
+ YIELD = 1,
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5
+};
+
+enum BarrierDomain {
+ OuterShareable = 0,
+ NonShareable = 1,
+ InnerShareable = 2,
+ FullSystem = 3
+};
+
+enum BarrierType {
+ BarrierOther = 0,
+ BarrierReads = 1,
+ BarrierWrites = 2,
+ BarrierAll = 3
+};
+
+// System/special register names.
+// This information is not encoded as one field but as the concatenation of
+// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
+enum SystemRegister {
+ NZCV = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x2 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset,
+ FPCR = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x4 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset
+};
+
+// Instruction enumerations.
+//
+// These are the masks that define a class of instructions, and the list of
+// instructions within each class. Each enumeration has a Fixed, FMask and
+// Mask value.
+//
+// Fixed: The fixed bits in this instruction class.
+// FMask: The mask used to extract the fixed bits in the class.
+// Mask: The mask used to identify the instructions within a class.
+//
+// The enumerations can be used like this:
+//
+// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// switch(instr->Mask(PCRelAddressingMask)) {
+// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
+// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
+// default: printf("Unknown instruction\n");
+// }
+
+
+// Generic fields.
+enum GenericInstrField {
+ SixtyFourBits = 0x80000000,
+ ThirtyTwoBits = 0x00000000,
+ FP32 = 0x00000000,
+ FP64 = 0x00400000
+};
+
+// PC relative addressing.
+enum PCRelAddressingOp {
+ PCRelAddressingFixed = 0x10000000,
+ PCRelAddressingFMask = 0x1F000000,
+ PCRelAddressingMask = 0x9F000000,
+ ADR = PCRelAddressingFixed | 0x00000000,
+ ADRP = PCRelAddressingFixed | 0x80000000
+};
+
+// Add/sub (immediate, shifted and extended.)
+const int kSFOffset = 31;
+enum AddSubOp {
+ AddSubOpMask = 0x60000000,
+ AddSubSetFlagsBit = 0x20000000,
+ ADD = 0x00000000,
+ ADDS = ADD | AddSubSetFlagsBit,
+ SUB = 0x40000000,
+ SUBS = SUB | AddSubSetFlagsBit
+};
+
+#define ADD_SUB_OP_LIST(V) \
+ V(ADD), \
+ V(ADDS), \
+ V(SUB), \
+ V(SUBS)
+
+enum AddSubImmediateOp {
+ AddSubImmediateFixed = 0x11000000,
+ AddSubImmediateFMask = 0x1F000000,
+ AddSubImmediateMask = 0xFF000000,
+ #define ADD_SUB_IMMEDIATE(A) \
+ A##_w_imm = AddSubImmediateFixed | A, \
+ A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
+ #undef ADD_SUB_IMMEDIATE
+};
+
+enum AddSubShiftedOp {
+ AddSubShiftedFixed = 0x0B000000,
+ AddSubShiftedFMask = 0x1F200000,
+ AddSubShiftedMask = 0xFF200000,
+ #define ADD_SUB_SHIFTED(A) \
+ A##_w_shift = AddSubShiftedFixed | A, \
+ A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
+ #undef ADD_SUB_SHIFTED
+};
+
+enum AddSubExtendedOp {
+ AddSubExtendedFixed = 0x0B200000,
+ AddSubExtendedFMask = 0x1F200000,
+ AddSubExtendedMask = 0xFFE00000,
+ #define ADD_SUB_EXTENDED(A) \
+ A##_w_ext = AddSubExtendedFixed | A, \
+ A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
+ #undef ADD_SUB_EXTENDED
+};
+
+// Add/sub with carry.
+enum AddSubWithCarryOp {
+ AddSubWithCarryFixed = 0x1A000000,
+ AddSubWithCarryFMask = 0x1FE00000,
+ AddSubWithCarryMask = 0xFFE0FC00,
+ ADC_w = AddSubWithCarryFixed | ADD,
+ ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
+ ADC = ADC_w,
+ ADCS_w = AddSubWithCarryFixed | ADDS,
+ ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
+ SBC_w = AddSubWithCarryFixed | SUB,
+ SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
+ SBC = SBC_w,
+ SBCS_w = AddSubWithCarryFixed | SUBS,
+ SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
+};
+
+
+// Logical (immediate and shifted register).
+enum LogicalOp {
+ LogicalOpMask = 0x60200000,
+ NOT = 0x00200000,
+ AND = 0x00000000,
+ BIC = AND | NOT,
+ ORR = 0x20000000,
+ ORN = ORR | NOT,
+ EOR = 0x40000000,
+ EON = EOR | NOT,
+ ANDS = 0x60000000,
+ BICS = ANDS | NOT
+};
+
+// Logical immediate.
+enum LogicalImmediateOp {
+ LogicalImmediateFixed = 0x12000000,
+ LogicalImmediateFMask = 0x1F800000,
+ LogicalImmediateMask = 0xFF800000,
+ AND_w_imm = LogicalImmediateFixed | AND,
+ AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
+ ORR_w_imm = LogicalImmediateFixed | ORR,
+ ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
+ EOR_w_imm = LogicalImmediateFixed | EOR,
+ EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
+ ANDS_w_imm = LogicalImmediateFixed | ANDS,
+ ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
+};
+
+// Logical shifted register.
+enum LogicalShiftedOp {
+ LogicalShiftedFixed = 0x0A000000,
+ LogicalShiftedFMask = 0x1F000000,
+ LogicalShiftedMask = 0xFF200000,
+ AND_w = LogicalShiftedFixed | AND,
+ AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
+ AND_shift = AND_w,
+ BIC_w = LogicalShiftedFixed | BIC,
+ BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
+ BIC_shift = BIC_w,
+ ORR_w = LogicalShiftedFixed | ORR,
+ ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
+ ORR_shift = ORR_w,
+ ORN_w = LogicalShiftedFixed | ORN,
+ ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
+ ORN_shift = ORN_w,
+ EOR_w = LogicalShiftedFixed | EOR,
+ EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
+ EOR_shift = EOR_w,
+ EON_w = LogicalShiftedFixed | EON,
+ EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
+ EON_shift = EON_w,
+ ANDS_w = LogicalShiftedFixed | ANDS,
+ ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
+ ANDS_shift = ANDS_w,
+ BICS_w = LogicalShiftedFixed | BICS,
+ BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
+ BICS_shift = BICS_w
+};
+
+// Move wide immediate.
+enum MoveWideImmediateOp {
+ MoveWideImmediateFixed = 0x12800000,
+ MoveWideImmediateFMask = 0x1F800000,
+ MoveWideImmediateMask = 0xFF800000,
+ MOVN = 0x00000000,
+ MOVZ = 0x40000000,
+ MOVK = 0x60000000,
+ MOVN_w = MoveWideImmediateFixed | MOVN,
+ MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
+ MOVZ_w = MoveWideImmediateFixed | MOVZ,
+ MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
+ MOVK_w = MoveWideImmediateFixed | MOVK,
+ MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
+};
+
+// Bitfield.
+const int kBitfieldNOffset = 22;
+enum BitfieldOp {
+ BitfieldFixed = 0x13000000,
+ BitfieldFMask = 0x1F800000,
+ BitfieldMask = 0xFF800000,
+ SBFM_w = BitfieldFixed | 0x00000000,
+ SBFM_x = BitfieldFixed | 0x80000000,
+ SBFM = SBFM_w,
+ BFM_w = BitfieldFixed | 0x20000000,
+ BFM_x = BitfieldFixed | 0xA0000000,
+ BFM = BFM_w,
+ UBFM_w = BitfieldFixed | 0x40000000,
+ UBFM_x = BitfieldFixed | 0xC0000000,
+ UBFM = UBFM_w
+ // Bitfield N field.
+};
+
+// Extract.
+enum ExtractOp {
+ ExtractFixed = 0x13800000,
+ ExtractFMask = 0x1F800000,
+ ExtractMask = 0xFFA00000,
+ EXTR_w = ExtractFixed | 0x00000000,
+ EXTR_x = ExtractFixed | 0x80000000,
+ EXTR = EXTR_w
+};
+
+// Unconditional branch.
+enum UnconditionalBranchOp {
+ UnconditionalBranchFixed = 0x14000000,
+ UnconditionalBranchFMask = 0x7C000000,
+ UnconditionalBranchMask = 0xFC000000,
+ B = UnconditionalBranchFixed | 0x00000000,
+ BL = UnconditionalBranchFixed | 0x80000000
+};
+
+// Unconditional branch to register.
+enum UnconditionalBranchToRegisterOp {
+ UnconditionalBranchToRegisterFixed = 0xD6000000,
+ UnconditionalBranchToRegisterFMask = 0xFE000000,
+ UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
+ BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
+ BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
+ RET = UnconditionalBranchToRegisterFixed | 0x005F0000
+};
+
+// Compare and branch.
+enum CompareBranchOp {
+ CompareBranchFixed = 0x34000000,
+ CompareBranchFMask = 0x7E000000,
+ CompareBranchMask = 0xFF000000,
+ CBZ_w = CompareBranchFixed | 0x00000000,
+ CBZ_x = CompareBranchFixed | 0x80000000,
+ CBZ = CBZ_w,
+ CBNZ_w = CompareBranchFixed | 0x01000000,
+ CBNZ_x = CompareBranchFixed | 0x81000000,
+ CBNZ = CBNZ_w
+};
+
+// Test and branch.
+enum TestBranchOp {
+ TestBranchFixed = 0x36000000,
+ TestBranchFMask = 0x7E000000,
+ TestBranchMask = 0x7F000000,
+ TBZ = TestBranchFixed | 0x00000000,
+ TBNZ = TestBranchFixed | 0x01000000
+};
+
+// Conditional branch.
+enum ConditionalBranchOp {
+ ConditionalBranchFixed = 0x54000000,
+ ConditionalBranchFMask = 0xFE000000,
+ ConditionalBranchMask = 0xFF000010,
+ B_cond = ConditionalBranchFixed | 0x00000000
+};
+
+// System.
+// System instruction encoding is complicated because some instructions use op
+// and CR fields to encode parameters. To handle this cleanly, the system
+// instructions are split into more than one enum.
+
+enum SystemOp {
+ SystemFixed = 0xD5000000,
+ SystemFMask = 0xFFC00000
+};
+
+enum SystemSysRegOp {
+ SystemSysRegFixed = 0xD5100000,
+ SystemSysRegFMask = 0xFFD00000,
+ SystemSysRegMask = 0xFFF00000,
+ MRS = SystemSysRegFixed | 0x00200000,
+ MSR = SystemSysRegFixed | 0x00000000
+};
+
+enum SystemHintOp {
+ SystemHintFixed = 0xD503201F,
+ SystemHintFMask = 0xFFFFF01F,
+ SystemHintMask = 0xFFFFF01F,
+ HINT = SystemHintFixed | 0x00000000
+};
+
+// Exception.
+enum ExceptionOp {
+ ExceptionFixed = 0xD4000000,
+ ExceptionFMask = 0xFF000000,
+ ExceptionMask = 0xFFE0001F,
+ HLT = ExceptionFixed | 0x00400000,
+ BRK = ExceptionFixed | 0x00200000,
+ SVC = ExceptionFixed | 0x00000001,
+ HVC = ExceptionFixed | 0x00000002,
+ SMC = ExceptionFixed | 0x00000003,
+ DCPS1 = ExceptionFixed | 0x00A00001,
+ DCPS2 = ExceptionFixed | 0x00A00002,
+ DCPS3 = ExceptionFixed | 0x00A00003
+};
+// Code used to spot hlt instructions that should not be hit.
+const int kHltBadCode = 0xbad;
+
+enum MemBarrierOp {
+ MemBarrierFixed = 0xD503309F,
+ MemBarrierFMask = 0xFFFFF09F,
+ MemBarrierMask = 0xFFFFF0FF,
+ DSB = MemBarrierFixed | 0x00000000,
+ DMB = MemBarrierFixed | 0x00000020,
+ ISB = MemBarrierFixed | 0x00000040
+};
+
+// Any load or store (including pair).
+enum LoadStoreAnyOp {
+ LoadStoreAnyFMask = 0x0a000000,
+ LoadStoreAnyFixed = 0x08000000
+};
+
+// Any load pair or store pair.
+enum LoadStorePairAnyOp {
+ LoadStorePairAnyFMask = 0x3a000000,
+ LoadStorePairAnyFixed = 0x28000000
+};
+
+#define LOAD_STORE_PAIR_OP_LIST(V) \
+ V(STP, w, 0x00000000), \
+ V(LDP, w, 0x00400000), \
+ V(LDPSW, x, 0x40400000), \
+ V(STP, x, 0x80000000), \
+ V(LDP, x, 0x80400000), \
+ V(STP, s, 0x04000000), \
+ V(LDP, s, 0x04400000), \
+ V(STP, d, 0x44000000), \
+ V(LDP, d, 0x44400000)
+
+// Load/store pair (post, pre and offset.)
+enum LoadStorePairOp {
+ LoadStorePairMask = 0xC4400000,
+ LoadStorePairLBit = 1 << 22,
+ #define LOAD_STORE_PAIR(A, B, C) \
+ A##_##B = C
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
+ #undef LOAD_STORE_PAIR
+};
+
+enum LoadStorePairPostIndexOp {
+ LoadStorePairPostIndexFixed = 0x28800000,
+ LoadStorePairPostIndexFMask = 0x3B800000,
+ LoadStorePairPostIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
+ A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
+ #undef LOAD_STORE_PAIR_POST_INDEX
+};
+
+enum LoadStorePairPreIndexOp {
+ LoadStorePairPreIndexFixed = 0x29800000,
+ LoadStorePairPreIndexFMask = 0x3B800000,
+ LoadStorePairPreIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
+ A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
+ #undef LOAD_STORE_PAIR_PRE_INDEX
+};
+
+enum LoadStorePairOffsetOp {
+ LoadStorePairOffsetFixed = 0x29000000,
+ LoadStorePairOffsetFMask = 0x3B800000,
+ LoadStorePairOffsetMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
+ A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
+ #undef LOAD_STORE_PAIR_OFFSET
+};
+
+enum LoadStorePairNonTemporalOp {
+ LoadStorePairNonTemporalFixed = 0x28000000,
+ LoadStorePairNonTemporalFMask = 0x3B800000,
+ LoadStorePairNonTemporalMask = 0xFFC00000,
+ STNP_w = LoadStorePairNonTemporalFixed | STP_w,
+ LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
+ STNP_x = LoadStorePairNonTemporalFixed | STP_x,
+ LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
+ STNP_s = LoadStorePairNonTemporalFixed | STP_s,
+ LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
+ STNP_d = LoadStorePairNonTemporalFixed | STP_d,
+ LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
+};
+
+// Load literal.
+enum LoadLiteralOp {
+ LoadLiteralFixed = 0x18000000,
+ LoadLiteralFMask = 0x3B000000,
+ LoadLiteralMask = 0xFF000000,
+ LDR_w_lit = LoadLiteralFixed | 0x00000000,
+ LDR_x_lit = LoadLiteralFixed | 0x40000000,
+ LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
+ PRFM_lit = LoadLiteralFixed | 0xC0000000,
+ LDR_s_lit = LoadLiteralFixed | 0x04000000,
+ LDR_d_lit = LoadLiteralFixed | 0x44000000
+};
+
+#define LOAD_STORE_OP_LIST(V) \
+ V(ST, RB, w, 0x00000000), \
+ V(ST, RH, w, 0x40000000), \
+ V(ST, R, w, 0x80000000), \
+ V(ST, R, x, 0xC0000000), \
+ V(LD, RB, w, 0x00400000), \
+ V(LD, RH, w, 0x40400000), \
+ V(LD, R, w, 0x80400000), \
+ V(LD, R, x, 0xC0400000), \
+ V(LD, RSB, x, 0x00800000), \
+ V(LD, RSH, x, 0x40800000), \
+ V(LD, RSW, x, 0x80800000), \
+ V(LD, RSB, w, 0x00C00000), \
+ V(LD, RSH, w, 0x40C00000), \
+ V(ST, R, s, 0x84000000), \
+ V(ST, R, d, 0xC4000000), \
+ V(LD, R, s, 0x84400000), \
+ V(LD, R, d, 0xC4400000)
+
+
+// Load/store unscaled offset.
+enum LoadStoreUnscaledOffsetOp {
+ LoadStoreUnscaledOffsetFixed = 0x38000000,
+ LoadStoreUnscaledOffsetFMask = 0x3B200C00,
+ LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+ #define LOAD_STORE_UNSCALED(A, B, C, D) \
+ A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
+ #undef LOAD_STORE_UNSCALED
+};
+
+// Load/store (post, pre, offset and unsigned.)
+enum LoadStoreOp {
+ LoadStoreOpMask = 0xC4C00000,
+ #define LOAD_STORE(A, B, C, D) \
+ A##B##_##C = D
+ LOAD_STORE_OP_LIST(LOAD_STORE),
+ #undef LOAD_STORE
+ PRFM = 0xC0800000
+};
+
+// Load/store post index.
+enum LoadStorePostIndex {
+ LoadStorePostIndexFixed = 0x38000400,
+ LoadStorePostIndexFMask = 0x3B200C00,
+ LoadStorePostIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_POST_INDEX(A, B, C, D) \
+ A##B##_##C##_post = LoadStorePostIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
+ #undef LOAD_STORE_POST_INDEX
+};
+
+// Load/store pre index.
+enum LoadStorePreIndex {
+ LoadStorePreIndexFixed = 0x38000C00,
+ LoadStorePreIndexFMask = 0x3B200C00,
+ LoadStorePreIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
+ A##B##_##C##_pre = LoadStorePreIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
+ #undef LOAD_STORE_PRE_INDEX
+};
+
+// Load/store unsigned offset.
+enum LoadStoreUnsignedOffset {
+ LoadStoreUnsignedOffsetFixed = 0x39000000,
+ LoadStoreUnsignedOffsetFMask = 0x3B000000,
+ LoadStoreUnsignedOffsetMask = 0xFFC00000,
+ PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
+ A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
+ #undef LOAD_STORE_UNSIGNED_OFFSET
+};
+
+// Load/store register offset.
+enum LoadStoreRegisterOffset {
+ LoadStoreRegisterOffsetFixed = 0x38200800,
+ LoadStoreRegisterOffsetFMask = 0x3B200C00,
+ LoadStoreRegisterOffsetMask = 0xFFE00C00,
+ PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
+ #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
+ A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
+ #undef LOAD_STORE_REGISTER_OFFSET
+};
+
+// Conditional compare.
+enum ConditionalCompareOp {
+ ConditionalCompareMask = 0x60000000,
+ CCMN = 0x20000000,
+ CCMP = 0x60000000
+};
+
+// Conditional compare register.
+enum ConditionalCompareRegisterOp {
+ ConditionalCompareRegisterFixed = 0x1A400000,
+ ConditionalCompareRegisterFMask = 0x1FE00800,
+ ConditionalCompareRegisterMask = 0xFFE00C10,
+ CCMN_w = ConditionalCompareRegisterFixed | CCMN,
+ CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
+ CCMP_w = ConditionalCompareRegisterFixed | CCMP,
+ CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP
+};
+
+// Conditional compare immediate.
+enum ConditionalCompareImmediateOp {
+ ConditionalCompareImmediateFixed = 0x1A400800,
+ ConditionalCompareImmediateFMask = 0x1FE00800,
+ ConditionalCompareImmediateMask = 0xFFE00C10,
+ CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
+ CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
+ CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
+ CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP
+};
+
+// Conditional select.
+enum ConditionalSelectOp {
+ ConditionalSelectFixed = 0x1A800000,
+ ConditionalSelectFMask = 0x1FE00000,
+ ConditionalSelectMask = 0xFFE00C00,
+ CSEL_w = ConditionalSelectFixed | 0x00000000,
+ CSEL_x = ConditionalSelectFixed | 0x80000000,
+ CSEL = CSEL_w,
+ CSINC_w = ConditionalSelectFixed | 0x00000400,
+ CSINC_x = ConditionalSelectFixed | 0x80000400,
+ CSINC = CSINC_w,
+ CSINV_w = ConditionalSelectFixed | 0x40000000,
+ CSINV_x = ConditionalSelectFixed | 0xC0000000,
+ CSINV = CSINV_w,
+ CSNEG_w = ConditionalSelectFixed | 0x40000400,
+ CSNEG_x = ConditionalSelectFixed | 0xC0000400,
+ CSNEG = CSNEG_w
+};
+
+// Data processing 1 source.
+enum DataProcessing1SourceOp {
+ DataProcessing1SourceFixed = 0x5AC00000,
+ DataProcessing1SourceFMask = 0x5FE00000,
+ DataProcessing1SourceMask = 0xFFFFFC00,
+ RBIT = DataProcessing1SourceFixed | 0x00000000,
+ RBIT_w = RBIT,
+ RBIT_x = RBIT | SixtyFourBits,
+ REV16 = DataProcessing1SourceFixed | 0x00000400,
+ REV16_w = REV16,
+ REV16_x = REV16 | SixtyFourBits,
+ REV = DataProcessing1SourceFixed | 0x00000800,
+ REV_w = REV,
+ REV32_x = REV | SixtyFourBits,
+ REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
+ CLZ = DataProcessing1SourceFixed | 0x00001000,
+ CLZ_w = CLZ,
+ CLZ_x = CLZ | SixtyFourBits,
+ CLS = DataProcessing1SourceFixed | 0x00001400,
+ CLS_w = CLS,
+ CLS_x = CLS | SixtyFourBits
+};
+
+// Data processing 2 source.
+enum DataProcessing2SourceOp {
+ DataProcessing2SourceFixed = 0x1AC00000,
+ DataProcessing2SourceFMask = 0x5FE00000,
+ DataProcessing2SourceMask = 0xFFE0FC00,
+ UDIV_w = DataProcessing2SourceFixed | 0x00000800,
+ UDIV_x = DataProcessing2SourceFixed | 0x80000800,
+ UDIV = UDIV_w,
+ SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
+ SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
+ SDIV = SDIV_w,
+ LSLV_w = DataProcessing2SourceFixed | 0x00002000,
+ LSLV_x = DataProcessing2SourceFixed | 0x80002000,
+ LSLV = LSLV_w,
+ LSRV_w = DataProcessing2SourceFixed | 0x00002400,
+ LSRV_x = DataProcessing2SourceFixed | 0x80002400,
+ LSRV = LSRV_w,
+ ASRV_w = DataProcessing2SourceFixed | 0x00002800,
+ ASRV_x = DataProcessing2SourceFixed | 0x80002800,
+ ASRV = ASRV_w,
+ RORV_w = DataProcessing2SourceFixed | 0x00002C00,
+ RORV_x = DataProcessing2SourceFixed | 0x80002C00,
+ RORV = RORV_w,
+ CRC32B = DataProcessing2SourceFixed | 0x00004000,
+ CRC32H = DataProcessing2SourceFixed | 0x00004400,
+ CRC32W = DataProcessing2SourceFixed | 0x00004800,
+ CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
+ CRC32CB = DataProcessing2SourceFixed | 0x00005000,
+ CRC32CH = DataProcessing2SourceFixed | 0x00005400,
+ CRC32CW = DataProcessing2SourceFixed | 0x00005800,
+ CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00
+};
+
+// Data processing 3 source.
+enum DataProcessing3SourceOp {
+ DataProcessing3SourceFixed = 0x1B000000,
+ DataProcessing3SourceFMask = 0x1F000000,
+ DataProcessing3SourceMask = 0xFFE08000,
+ MADD_w = DataProcessing3SourceFixed | 0x00000000,
+ MADD_x = DataProcessing3SourceFixed | 0x80000000,
+ MADD = MADD_w,
+ MSUB_w = DataProcessing3SourceFixed | 0x00008000,
+ MSUB_x = DataProcessing3SourceFixed | 0x80008000,
+ MSUB = MSUB_w,
+ SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
+ SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
+ SMULH_x = DataProcessing3SourceFixed | 0x80400000,
+ UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
+ UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
+ UMULH_x = DataProcessing3SourceFixed | 0x80C00000
+};
+
+// Floating point compare.
+enum FPCompareOp {
+ FPCompareFixed = 0x1E202000,
+ FPCompareFMask = 0x5F203C00,
+ FPCompareMask = 0xFFE0FC1F,
+ FCMP_s = FPCompareFixed | 0x00000000,
+ FCMP_d = FPCompareFixed | FP64 | 0x00000000,
+ FCMP = FCMP_s,
+ FCMP_s_zero = FPCompareFixed | 0x00000008,
+ FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
+ FCMP_zero = FCMP_s_zero,
+ FCMPE_s = FPCompareFixed | 0x00000010,
+ FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
+ FCMPE_s_zero = FPCompareFixed | 0x00000018,
+ FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018
+};
+
+// Floating point conditional compare.
+enum FPConditionalCompareOp {
+ FPConditionalCompareFixed = 0x1E200400,
+ FPConditionalCompareFMask = 0x5F200C00,
+ FPConditionalCompareMask = 0xFFE00C10,
+ FCCMP_s = FPConditionalCompareFixed | 0x00000000,
+ FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
+ FCCMP = FCCMP_s,
+ FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
+ FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
+ FCCMPE = FCCMPE_s
+};
+
+// Floating point conditional select.
+enum FPConditionalSelectOp {
+ FPConditionalSelectFixed = 0x1E200C00,
+ FPConditionalSelectFMask = 0x5F200C00,
+ FPConditionalSelectMask = 0xFFE00C00,
+ FCSEL_s = FPConditionalSelectFixed | 0x00000000,
+ FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
+ FCSEL = FCSEL_s
+};
+
+// Floating point immediate.
+enum FPImmediateOp {
+ FPImmediateFixed = 0x1E201000,
+ FPImmediateFMask = 0x5F201C00,
+ FPImmediateMask = 0xFFE01C00,
+ FMOV_s_imm = FPImmediateFixed | 0x00000000,
+ FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
+};
+
+// Floating point data processing 1 source.
+enum FPDataProcessing1SourceOp {
+ FPDataProcessing1SourceFixed = 0x1E204000,
+ FPDataProcessing1SourceFMask = 0x5F207C00,
+ FPDataProcessing1SourceMask = 0xFFFFFC00,
+ FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
+ FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
+ FMOV = FMOV_s,
+ FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
+ FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
+ FABS = FABS_s,
+ FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
+ FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
+ FNEG = FNEG_s,
+ FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
+ FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
+ FSQRT = FSQRT_s,
+ FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
+ FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
+ FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
+ FRINTN = FRINTN_s,
+ FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
+ FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
+ FRINTP = FRINTP_s,
+ FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
+ FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
+ FRINTM = FRINTM_s,
+ FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
+ FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
+ FRINTZ = FRINTZ_s,
+ FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
+ FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
+ FRINTA = FRINTA_s,
+ FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
+ FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
+ FRINTX = FRINTX_s,
+ FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
+ FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
+ FRINTI = FRINTI_s
+};
+
+// Floating point data processing 2 source.
+enum FPDataProcessing2SourceOp {
+ FPDataProcessing2SourceFixed = 0x1E200800,
+ FPDataProcessing2SourceFMask = 0x5F200C00,
+ FPDataProcessing2SourceMask = 0xFFE0FC00,
+ FMUL = FPDataProcessing2SourceFixed | 0x00000000,
+ FMUL_s = FMUL,
+ FMUL_d = FMUL | FP64,
+ FDIV = FPDataProcessing2SourceFixed | 0x00001000,
+ FDIV_s = FDIV,
+ FDIV_d = FDIV | FP64,
+ FADD = FPDataProcessing2SourceFixed | 0x00002000,
+ FADD_s = FADD,
+ FADD_d = FADD | FP64,
+ FSUB = FPDataProcessing2SourceFixed | 0x00003000,
+ FSUB_s = FSUB,
+ FSUB_d = FSUB | FP64,
+ FMAX = FPDataProcessing2SourceFixed | 0x00004000,
+ FMAX_s = FMAX,
+ FMAX_d = FMAX | FP64,
+ FMIN = FPDataProcessing2SourceFixed | 0x00005000,
+ FMIN_s = FMIN,
+ FMIN_d = FMIN | FP64,
+ FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
+ FMAXNM_s = FMAXNM,
+ FMAXNM_d = FMAXNM | FP64,
+ FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
+ FMINNM_s = FMINNM,
+ FMINNM_d = FMINNM | FP64,
+ FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
+ FNMUL_s = FNMUL,
+ FNMUL_d = FNMUL | FP64
+};
+
+// Floating point data processing 3 source.
+enum FPDataProcessing3SourceOp {
+ FPDataProcessing3SourceFixed = 0x1F000000,
+ FPDataProcessing3SourceFMask = 0x5F000000,
+ FPDataProcessing3SourceMask = 0xFFE08000,
+ FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
+ FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
+ FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
+ FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
+ FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
+ FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
+ FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
+ FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
+};
+
+// Conversion between floating point and integer.
+enum FPIntegerConvertOp {
+ FPIntegerConvertFixed = 0x1E200000,
+ FPIntegerConvertFMask = 0x5F20FC00,
+ FPIntegerConvertMask = 0xFFFFFC00,
+ FCVTNS = FPIntegerConvertFixed | 0x00000000,
+ FCVTNS_ws = FCVTNS,
+ FCVTNS_xs = FCVTNS | SixtyFourBits,
+ FCVTNS_wd = FCVTNS | FP64,
+ FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
+ FCVTNU = FPIntegerConvertFixed | 0x00010000,
+ FCVTNU_ws = FCVTNU,
+ FCVTNU_xs = FCVTNU | SixtyFourBits,
+ FCVTNU_wd = FCVTNU | FP64,
+ FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
+ FCVTPS = FPIntegerConvertFixed | 0x00080000,
+ FCVTPS_ws = FCVTPS,
+ FCVTPS_xs = FCVTPS | SixtyFourBits,
+ FCVTPS_wd = FCVTPS | FP64,
+ FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
+ FCVTPU = FPIntegerConvertFixed | 0x00090000,
+ FCVTPU_ws = FCVTPU,
+ FCVTPU_xs = FCVTPU | SixtyFourBits,
+ FCVTPU_wd = FCVTPU | FP64,
+ FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
+ FCVTMS = FPIntegerConvertFixed | 0x00100000,
+ FCVTMS_ws = FCVTMS,
+ FCVTMS_xs = FCVTMS | SixtyFourBits,
+ FCVTMS_wd = FCVTMS | FP64,
+ FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
+ FCVTMU = FPIntegerConvertFixed | 0x00110000,
+ FCVTMU_ws = FCVTMU,
+ FCVTMU_xs = FCVTMU | SixtyFourBits,
+ FCVTMU_wd = FCVTMU | FP64,
+ FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
+ FCVTZS = FPIntegerConvertFixed | 0x00180000,
+ FCVTZS_ws = FCVTZS,
+ FCVTZS_xs = FCVTZS | SixtyFourBits,
+ FCVTZS_wd = FCVTZS | FP64,
+ FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
+ FCVTZU = FPIntegerConvertFixed | 0x00190000,
+ FCVTZU_ws = FCVTZU,
+ FCVTZU_xs = FCVTZU | SixtyFourBits,
+ FCVTZU_wd = FCVTZU | FP64,
+ FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
+ SCVTF = FPIntegerConvertFixed | 0x00020000,
+ SCVTF_sw = SCVTF,
+ SCVTF_sx = SCVTF | SixtyFourBits,
+ SCVTF_dw = SCVTF | FP64,
+ SCVTF_dx = SCVTF | SixtyFourBits | FP64,
+ UCVTF = FPIntegerConvertFixed | 0x00030000,
+ UCVTF_sw = UCVTF,
+ UCVTF_sx = UCVTF | SixtyFourBits,
+ UCVTF_dw = UCVTF | FP64,
+ UCVTF_dx = UCVTF | SixtyFourBits | FP64,
+ FCVTAS = FPIntegerConvertFixed | 0x00040000,
+ FCVTAS_ws = FCVTAS,
+ FCVTAS_xs = FCVTAS | SixtyFourBits,
+ FCVTAS_wd = FCVTAS | FP64,
+ FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
+ FCVTAU = FPIntegerConvertFixed | 0x00050000,
+ FCVTAU_ws = FCVTAU,
+ FCVTAU_xs = FCVTAU | SixtyFourBits,
+ FCVTAU_wd = FCVTAU | FP64,
+ FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
+ FMOV_ws = FPIntegerConvertFixed | 0x00060000,
+ FMOV_sw = FPIntegerConvertFixed | 0x00070000,
+ FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
+ FMOV_dx = FMOV_sw | SixtyFourBits | FP64
+};
+
+// Conversion between fixed point and floating point.
+enum FPFixedPointConvertOp {
+ FPFixedPointConvertFixed = 0x1E000000,
+ FPFixedPointConvertFMask = 0x5F200000,
+ FPFixedPointConvertMask = 0xFFFF0000,
+ FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
+ FCVTZS_ws_fixed = FCVTZS_fixed,
+ FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
+ FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
+ FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
+ FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
+ FCVTZU_ws_fixed = FCVTZU_fixed,
+ FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
+ FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
+ FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
+ SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
+ SCVTF_sw_fixed = SCVTF_fixed,
+ SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
+ SCVTF_dw_fixed = SCVTF_fixed | FP64,
+ SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
+ UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
+ UCVTF_sw_fixed = UCVTF_fixed,
+ UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
+ UCVTF_dw_fixed = UCVTF_fixed | FP64,
+ UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
+};
+
+// Unimplemented and unallocated instructions. These are defined to make fixed
+// bit assertion easier.
+enum UnimplementedOp {
+ UnimplementedFixed = 0x00000000,
+ UnimplementedFMask = 0x00000000
+};
+
+enum UnallocatedOp {
+ UnallocatedFixed = 0x00000000,
+ UnallocatedFMask = 0x00000000
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CONSTANTS_ARM64_H_
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
new file mode 100644
index 0000000000..b8899adb37
--- /dev/null
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -0,0 +1,199 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "arm64/cpu-arm64.h"
+#include "arm64/utils-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cross_compile_ = 0;
+
+// Initialise to smallest possible cache size.
+unsigned CpuFeatures::dcache_line_size_ = 1;
+unsigned CpuFeatures::icache_line_size_ = 1;
+
+
+void CPU::SetUp() {
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return true;
+}
+
+
+void CPU::FlushICache(void* address, size_t length) {
+ if (length == 0) {
+ return;
+ }
+
+#ifdef USE_SIMULATOR
+ // TODO(all): consider doing some cache simulation to ensure every address
+ // run has been synced.
+ USE(address);
+ USE(length);
+#else
+ // The code below assumes user space cache operations are allowed. The goal
+ // of this routine is to make sure the code generated is visible to the I
+ // side of the CPU.
+
+ uintptr_t start = reinterpret_cast<uintptr_t>(address);
+ // Sizes will be used to generate a mask big enough to cover a pointer.
+ uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size());
+ uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size());
+ // Cache line sizes are always a power of 2.
+ ASSERT(CountSetBits(dsize, 64) == 1);
+ ASSERT(CountSetBits(isize, 64) == 1);
+ uintptr_t dstart = start & ~(dsize - 1);
+ uintptr_t istart = start & ~(isize - 1);
+ uintptr_t end = start + length;
+
+ __asm__ __volatile__ ( // NOLINT
+ // Clean every line of the D cache containing the target data.
+ "0: \n\t"
+ // dc : Data Cache maintenance
+ // c : Clean
+ // va : by (Virtual) Address
+ // u : to the point of Unification
+ // The point of unification for a processor is the point by which the
+ // instruction and data caches are guaranteed to see the same copy of a
+ // memory location. See ARM DDI 0406B page B2-12 for more information.
+ "dc cvau, %[dline] \n\t"
+ "add %[dline], %[dline], %[dsize] \n\t"
+ "cmp %[dline], %[end] \n\t"
+ "b.lt 0b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the rest
+ // of the world.
+ // dsb : Data Synchronisation Barrier
+ // ish : Inner SHareable domain
+ // The point of unification for an Inner Shareable shareability domain is
+ // the point by which the instruction and data caches of all the processors
+ // in that Inner Shareable shareability domain are guaranteed to see the
+ // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
+ // information.
+ "dsb ish \n\t"
+ // Invalidate every line of the I cache containing the target data.
+ "1: \n\t"
+ // ic : instruction cache maintenance
+ // i : invalidate
+ // va : by address
+ // u : to the point of unification
+ "ic ivau, %[iline] \n\t"
+ "add %[iline], %[iline], %[isize] \n\t"
+ "cmp %[iline], %[end] \n\t"
+ "b.lt 1b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the rest
+ // of the world.
+ "dsb ish \n\t"
+ // Barrier to ensure any prefetching which happened before this code is
+ // discarded.
+ // isb : Instruction Synchronisation Barrier
+ "isb \n\t"
+ : [dline] "+r" (dstart),
+ [iline] "+r" (istart)
+ : [dsize] "r" (dsize),
+ [isize] "r" (isize),
+ [end] "r" (end)
+ // This code does not write to memory but without the dependency gcc might
+ // move this code before the code is generated.
+ : "cc", "memory"
+ ); // NOLINT
+#endif
+}
+
+
+void CpuFeatures::Probe() {
+ // Compute I and D cache line size. The cache type register holds
+ // information about the caches.
+ uint32_t cache_type_register = GetCacheType();
+
+ static const int kDCacheLineSizeShift = 16;
+ static const int kICacheLineSizeShift = 0;
+ static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
+ static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
+
+ // The cache type register holds the size of the I and D caches as a power of
+ // two.
+ uint32_t dcache_line_size_power_of_two =
+ (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
+ uint32_t icache_line_size_power_of_two =
+ (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
+
+ dcache_line_size_ = 1 << dcache_line_size_power_of_two;
+ icache_line_size_ = 1 << icache_line_size_power_of_two;
+
+ // AArch64 has no configuration options, no further probing is required.
+ supported_ = 0;
+
+#ifdef DEBUG
+ initialized_ = true;
+#endif
+}
+
+
+unsigned CpuFeatures::dcache_line_size() {
+ ASSERT(initialized_);
+ return dcache_line_size_;
+}
+
+
+unsigned CpuFeatures::icache_line_size() {
+ ASSERT(initialized_);
+ return icache_line_size_;
+}
+
+
+uint32_t CpuFeatures::GetCacheType() {
+#ifdef USE_SIMULATOR
+ // This will lead to a cache with 1 byte long lines, which is fine since the
+ // simulator will not need this information.
+ return 0;
+#else
+ uint32_t cache_type_register;
+ // Copy the content of the cache type register to a core register.
+ __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
+ : [ctr] "=r" (cache_type_register));
+ return cache_type_register;
+#endif
+}
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/cpu-arm64.h b/deps/v8/src/arm64/cpu-arm64.h
new file mode 100644
index 0000000000..ddec72d8f6
--- /dev/null
+++ b/deps/v8/src/arm64/cpu-arm64.h
@@ -0,0 +1,107 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_CPU_ARM64_H_
+#define V8_ARM64_CPU_ARM64_H_
+
+#include <stdio.h>
+#include "serialize.h"
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a CpuFeatureScope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
+ // There are no optional features for ARM64.
+ return false;
+ };
+
+ static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
+ ASSERT(initialized_);
+ // There are no optional features for ARM64.
+ return false;
+ }
+
+ static bool IsSafeForSnapshot(CpuFeature f) {
+ return (IsSupported(f) &&
+ (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ }
+
+ // I and D cache line size in bytes.
+ static unsigned dcache_line_size();
+ static unsigned icache_line_size();
+
+ static unsigned supported_;
+
+ static bool VerifyCrossCompiling() {
+ // There are no optional features for ARM64.
+ ASSERT(cross_compile_ == 0);
+ return true;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ // There are no optional features for ARM64.
+ USE(f);
+ ASSERT(cross_compile_ == 0);
+ return true;
+ }
+
+ private:
+ // Return the content of the cache type register.
+ static uint32_t GetCacheType();
+
+ // I and D cache line size in bytes.
+ static unsigned icache_line_size_;
+ static unsigned dcache_line_size_;
+
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+
+ // This isn't used (and is always 0), but it is required by V8.
+ static unsigned found_by_runtime_probing_only_;
+
+ static unsigned cross_compile_;
+
+ friend class PlatformFeatureScope;
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CPU_ARM64_H_
diff --git a/deps/v8/src/arm64/debug-arm64.cc b/deps/v8/src/arm64/debug-arm64.cc
new file mode 100644
index 0000000000..716337f051
--- /dev/null
+++ b/deps/v8/src/arm64/debug-arm64.cc
@@ -0,0 +1,393 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "codegen.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ // Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
+ // the return from JS function sequence from
+ // mov sp, fp
+ // ldp fp, lr, [sp] #16
+ // lrd ip0, [pc, #(3 * kInstructionSize)]
+ // add sp, sp, ip0
+ // ret
+ // <number of paramters ...
+ // ... plus one (64 bits)>
+ // to a call to the debug break return code.
+ // ldr ip0, [pc, #(3 * kInstructionSize)]
+ // blr ip0
+ // hlt kHltBadCode @ code should not return, catch if it does.
+ // <debug break return code ...
+ // ... entry point address (64 bits)>
+
+ // The patching code must not overflow the space occupied by the return
+ // sequence.
+ STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
+ byte* entry =
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry();
+
+ // The first instruction of a patched return sequence must be a load literal
+ // loading the address of the debug break return code.
+ patcher.LoadLiteral(ip0, 3 * kInstructionSize);
+ // TODO(all): check the following is correct.
+ // The debug break return code will push a frame and call statically compiled
+ // code. By using blr, even though control will not return after the branch,
+ // this call site will be registered in the frame (lr being saved as the pc
+ // of the next instruction to execute for this frame). The debugger can now
+ // iterate on the frames to find call to debug break return code.
+ patcher.blr(ip0);
+ patcher.hlt(kHltBadCode);
+ patcher.dc64(reinterpret_cast<int64_t>(entry));
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ // Reset the code emitted by EmitReturnSequence to its original state.
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSRetSequenceInstructions);
+}
+
+
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ // Patch the code emitted by Debug::GenerateSlots, changing the debug break
+ // slot code from
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // to a call to the debug slot code.
+ // ldr ip0, [pc, #(2 * kInstructionSize)]
+ // blr ip0
+ // <debug break slot code ...
+ // ... entry point address (64 bits)>
+
+ // TODO(all): consider adding a hlt instruction after the blr as we don't
+ // expect control to return here. This implies increasing
+ // kDebugBreakSlotInstructions to 5 instructions.
+
+ // The patching code must not overflow the space occupied by the return
+ // sequence.
+ STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
+ byte* entry =
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry();
+
+ // The first instruction of a patched debug break slot must be a load literal
+ // loading the address of the debug break slot code.
+ patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ // TODO(all): check the following is correct.
+ // The debug break slot code will push a frame and call statically compiled
+ // code. By using blr, event hough control will not return after the branch,
+ // this call site will be registered in the frame (lr being saved as the pc
+ // of the next instruction to execute for this frame). The debugger can now
+ // iterate on the frames to find call to debug break slot code.
+ patcher.blr(ip0);
+ patcher.dc64(reinterpret_cast<int64_t>(entry));
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
+}
+
+const bool Debug::FramePaddingLayout::kIsSupported = false;
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs,
+ Register scratch) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Any live values (object_regs and non_object_regs) in caller-saved
+ // registers (or lr) need to be stored on the stack so that their values are
+ // safely preserved for a call into C code.
+ //
+ // Also:
+ // * object_regs may be modified during the C code by the garbage
+ // collector. Every object register must be a valid tagged pointer or
+ // SMI.
+ //
+ // * non_object_regs will be converted to SMIs so that the garbage
+ // collector doesn't try to interpret them as pointers.
+ //
+ // TODO(jbramley): Why can't this handle callee-saved registers?
+ ASSERT((~kCallerSaved.list() & object_regs) == 0);
+ ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ ASSERT((scratch.Bit() & object_regs) == 0);
+ ASSERT((scratch.Bit() & non_object_regs) == 0);
+ ASSERT((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
+ STATIC_ASSERT(kSmiValueSize == 32);
+
+ CPURegList non_object_list =
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
+ while (!non_object_list.IsEmpty()) {
+ // Store each non-object register as two SMIs.
+ Register reg = Register(non_object_list.PopLowestIndex());
+ __ Push(reg);
+ __ Poke(wzr, 0);
+ __ Push(reg.W(), wzr);
+ // Stack:
+ // jssp[12]: reg[63:32]
+ // jssp[8]: 0x00000000 (SMI tag & padding)
+ // jssp[4]: reg[31:0]
+ // jssp[0]: 0x00000000 (SMI tag & padding)
+ STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
+ }
+
+ if (object_regs != 0) {
+ __ PushXRegList(object_regs);
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ Mov(x0, 0); // No arguments.
+ __ Mov(x1, ExternalReference::debug_break(masm->isolate()));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+
+ // Restore the register values from the expression stack.
+ if (object_regs != 0) {
+ __ PopXRegList(object_regs);
+ }
+
+ non_object_list =
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
+ while (!non_object_list.IsEmpty()) {
+ // Load each non-object register from two SMIs.
+ // Stack:
+ // jssp[12]: reg[63:32]
+ // jssp[8]: 0x00000000 (SMI tag & padding)
+ // jssp[4]: reg[31:0]
+ // jssp[0]: 0x00000000 (SMI tag & padding)
+ Register reg = Register(non_object_list.PopHighestIndex());
+ __ Pop(scratch, reg);
+ __ Bfxil(reg, scratch, 32, 32);
+ }
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
+ masm->isolate());
+ __ Mov(scratch, after_break_target);
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Br(scratch);
+}
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC load (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers x0 and x2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC store (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ // Registers x0, x1, and x2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC call (from ic-arm.cc)
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that r0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -- x2 : feedback array
+ // -- x3 : slot in feedback array
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments (not smi)
+ // -- x1 : constructor function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments (not smi)
+ // -- x1 : constructor function
+ // -- x2 : feedback array
+ // -- x3 : feedback slot (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(
+ masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
+}
+
+
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the constant pool in the debug break slot code.
+ InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
+
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(Assembler::DEBUG_BREAK_NOP);
+ }
+}
+
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0, x10);
+}
+
+
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
+}
+
+
+void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
+}
+
+const bool Debug::kFrameDropperSupported = false;
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
new file mode 100644
index 0000000000..94009c704d
--- /dev/null
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -0,0 +1,671 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_DECODER_ARM64_INL_H_
+#define V8_ARM64_DECODER_ARM64_INL_H_
+
+#include "arm64/decoder-arm64.h"
+#include "globals.h"
+#include "utils.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// Top-level instruction decode function.
+template<typename V>
+void Decoder<V>::Decode(Instruction *instr) {
+ if (instr->Bits(28, 27) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(27, 24)) {
+ // 0: PC relative addressing.
+ case 0x0: DecodePCRelAddressing(instr); break;
+
+ // 1: Add/sub immediate.
+ case 0x1: DecodeAddSubImmediate(instr); break;
+
+ // A: Logical shifted register.
+ // Add/sub with carry.
+ // Conditional compare register.
+ // Conditional compare immediate.
+ // Conditional select.
+ // Data processing 1 source.
+ // Data processing 2 source.
+ // B: Add/sub shifted register.
+ // Add/sub extended register.
+ // Data processing 3 source.
+ case 0xA:
+ case 0xB: DecodeDataProcessing(instr); break;
+
+ // 2: Logical immediate.
+ // Move wide immediate.
+ case 0x2: DecodeLogical(instr); break;
+
+ // 3: Bitfield.
+ // Extract.
+ case 0x3: DecodeBitfieldExtract(instr); break;
+
+ // 4: Unconditional branch immediate.
+ // Exception generation.
+ // Compare and branch immediate.
+ // 5: Compare and branch immediate.
+ // Conditional branch.
+ // System.
+ // 6,7: Unconditional branch.
+ // Test and branch immediate.
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7: DecodeBranchSystemException(instr); break;
+
+ // 8,9: Load/store register pair post-index.
+ // Load register literal.
+ // Load/store register unscaled immediate.
+ // Load/store register immediate post-index.
+ // Load/store register immediate pre-index.
+ // Load/store register offset.
+ // C,D: Load/store register pair offset.
+ // Load/store register pair pre-index.
+ // Load/store register unsigned immediate.
+ // Advanced SIMD.
+ case 0x8:
+ case 0x9:
+ case 0xC:
+ case 0xD: DecodeLoadStore(instr); break;
+
+ // E: FP fixed point conversion.
+ // FP integer conversion.
+ // FP data processing 1 source.
+ // FP compare.
+ // FP immediate.
+ // FP data processing 2 source.
+ // FP conditional compare.
+ // FP conditional select.
+ // Advanced SIMD.
+ // F: FP data processing 3 source.
+ // Advanced SIMD.
+ case 0xE:
+ case 0xF: DecodeFP(instr); break;
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x0);
+ // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
+ // decode.
+ ASSERT(instr->Bit(28) == 0x1);
+ V::VisitPCRelAddressing(instr);
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x4) ||
+ (instr->Bits(27, 24) == 0x5) ||
+ (instr->Bits(27, 24) == 0x6) ||
+ (instr->Bits(27, 24) == 0x7) );
+
+ switch (instr->Bits(31, 29)) {
+ case 0:
+ case 4: {
+ V::VisitUnconditionalBranch(instr);
+ break;
+ }
+ case 1:
+ case 5: {
+ if (instr->Bit(25) == 0) {
+ V::VisitCompareBranch(instr);
+ } else {
+ V::VisitTestBranch(instr);
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(25) == 0) {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Mask(0x01000010) == 0x00000010)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitConditionalBranch(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(25) == 0) {
+ if (instr->Bit(24) == 0) {
+ if ((instr->Bits(4, 2) != 0) ||
+ (instr->Mask(0x00E0001D) == 0x00200001) ||
+ (instr->Mask(0x00E0001D) == 0x00400001) ||
+ (instr->Mask(0x00E0001E) == 0x00200002) ||
+ (instr->Mask(0x00E0001E) == 0x00400002) ||
+ (instr->Mask(0x00E0001C) == 0x00600000) ||
+ (instr->Mask(0x00E0001C) == 0x00800000) ||
+ (instr->Mask(0x00E0001F) == 0x00A00000) ||
+ (instr->Mask(0x00C0001C) == 0x00C00000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitException(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
+ if ((instr->Bits(21, 19) == 0x4) ||
+ (masked_003FF0E0 == 0x00033000) ||
+ (masked_003FF0E0 == 0x003FF020) ||
+ (masked_003FF0E0 == 0x003FF060) ||
+ (masked_003FF0E0 == 0x003FF0E0) ||
+ (instr->Mask(0x00388000) == 0x00008000) ||
+ (instr->Mask(0x0038E000) == 0x00000000) ||
+ (instr->Mask(0x0039E000) == 0x00002000) ||
+ (instr->Mask(0x003AE000) == 0x00002000) ||
+ (instr->Mask(0x003CE000) == 0x00042000) ||
+ (instr->Mask(0x003FFFC0) == 0x000320C0) ||
+ (instr->Mask(0x003FF100) == 0x00032100) ||
+ (instr->Mask(0x003FF200) == 0x00032200) ||
+ (instr->Mask(0x003FF400) == 0x00032400) ||
+ (instr->Mask(0x003FF800) == 0x00032800) ||
+ (instr->Mask(0x0038F000) == 0x00005000) ||
+ (instr->Mask(0x0038E000) == 0x00006000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitSystem(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Bits(20, 16) != 0x1F) ||
+ (instr->Bits(15, 10) != 0) ||
+ (instr->Bits(4, 0) != 0) ||
+ (instr->Bits(24, 21) == 0x3) ||
+ (instr->Bits(24, 22) == 0x3)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitUnconditionalBranchToRegister(instr);
+ }
+ }
+ break;
+ }
+ case 3:
+ case 7: {
+ V::VisitUnallocated(instr);
+ break;
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeLoadStore(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x8) ||
+ (instr->Bits(27, 24) == 0x9) ||
+ (instr->Bits(27, 24) == 0xC) ||
+ (instr->Bits(27, 24) == 0xD) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(26) == 0) {
+ // TODO(all): VisitLoadStoreExclusive.
+ V::VisitUnimplemented(instr);
+ } else {
+ DecodeAdvSIMDLoadStore(instr);
+ }
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Mask(0xC4400000) == 0xC0400000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStorePairNonTemporal(instr);
+ }
+ } else {
+ V::VisitLoadStorePairPostIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Mask(0xC4000000) == 0xC4000000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadLiteral(instr);
+ }
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(21) == 0) {
+ switch (instr->Bits(11, 10)) {
+ case 0: {
+ V::VisitLoadStoreUnscaledOffset(instr);
+ break;
+ }
+ case 1: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStorePostIndex(instr);
+ }
+ break;
+ }
+ case 2: {
+ // TODO(all): VisitLoadStoreRegisterOffsetUnpriv.
+ V::VisitUnimplemented(instr);
+ break;
+ }
+ case 3: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStorePreIndex(instr);
+ }
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(11, 10) == 0x2) {
+ if (instr->Bit(14) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStoreRegisterOffset(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ V::VisitLoadStorePairOffset(instr);
+ } else {
+ V::VisitLoadStorePairPreIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStoreUnsignedOffset(instr);
+ }
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeLogical(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x2);
+
+ if (instr->Mask(0x80400000) == 0x00400000) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ V::VisitLogicalImmediate(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x1) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitMoveWideImmediate(instr);
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x3);
+
+ if ((instr->Mask(0x80400000) == 0x80000000) ||
+ (instr->Mask(0x80400000) == 0x00400000) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ V::VisitUnallocated(instr);
+ } else if (instr->Bit(23) == 0) {
+ if ((instr->Mask(0x80200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) == 0x60000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitBitfield(instr);
+ }
+ } else {
+ if ((instr->Mask(0x60200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) != 0x00000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitExtract(instr);
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x1);
+ if (instr->Bit(23) == 1) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubImmediate(instr);
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xA) ||
+ (instr->Bits(27, 24) == 0xB) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Mask(0x80008000) == 0x00008000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLogicalShifted(instr);
+ }
+ } else {
+ switch (instr->Bits(23, 21)) {
+ case 0: {
+ if (instr->Mask(0x0000FC00) != 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubWithCarry(instr);
+ }
+ break;
+ }
+ case 2: {
+ if ((instr->Bit(29) == 0) ||
+ (instr->Mask(0x00000410) != 0)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(11) == 0) {
+ V::VisitConditionalCompareRegister(instr);
+ } else {
+ V::VisitConditionalCompareImmediate(instr);
+ }
+ }
+ break;
+ }
+ case 4: {
+ if (instr->Mask(0x20000800) != 0x00000000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitConditionalSelect(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(29) == 0x1) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(30) == 0) {
+ if ((instr->Bit(15) == 0x1) ||
+ (instr->Bits(15, 11) == 0) ||
+ (instr->Bits(15, 12) == 0x1) ||
+ (instr->Bits(15, 12) == 0x3) ||
+ (instr->Bits(15, 13) == 0x3) ||
+ (instr->Mask(0x8000EC00) == 0x00004C00) ||
+ (instr->Mask(0x8000E800) == 0x80004000) ||
+ (instr->Mask(0x8000E400) == 0x80004000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitDataProcessing2Source(instr);
+ }
+ } else {
+ if ((instr->Bit(13) == 1) ||
+ (instr->Bits(20, 16) != 0) ||
+ (instr->Bits(15, 14) != 0) ||
+ (instr->Mask(0xA01FFC00) == 0x00000C00) ||
+ (instr->Mask(0x201FF800) == 0x00001800)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitDataProcessing1Source(instr);
+ }
+ }
+ break;
+ }
+ }
+ case 1:
+ case 3:
+ case 5:
+ case 7: V::VisitUnallocated(instr); break;
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubShifted(instr);
+ }
+ } else {
+ if ((instr->Mask(0x00C00000) != 0x00000000) ||
+ (instr->Mask(0x00001400) == 0x00001400) ||
+ (instr->Mask(0x00001800) == 0x00001800)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubExtended(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(30) == 0x1) ||
+ (instr->Bits(30, 29) == 0x1) ||
+ (instr->Mask(0xE0600000) == 0x00200000) ||
+ (instr->Mask(0xE0608000) == 0x00400000) ||
+ (instr->Mask(0x60608000) == 0x00408000) ||
+ (instr->Mask(0x60E00000) == 0x00E00000) ||
+ (instr->Mask(0x60E00000) == 0x00800000) ||
+ (instr->Mask(0x60E00000) == 0x00600000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitDataProcessing3Source(instr);
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeFP(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xE) ||
+ (instr->Bits(27, 24) == 0xF) );
+
+ if (instr->Bit(28) == 0) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(29) == 1) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bits(31, 30) == 0x3) {
+ V::VisitUnallocated(instr);
+ } else if (instr->Bits(31, 30) == 0x1) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bit(23) == 1) ||
+ (instr->Bit(18) == 1) ||
+ (instr->Mask(0x80008000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x00160000) == 0x00000000) ||
+ (instr->Mask(0x00160000) == 0x00120000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPFixedPointConvert(instr);
+ }
+ } else {
+ if (instr->Bits(15, 10) == 32) {
+ V::VisitUnallocated(instr);
+ } else if (instr->Bits(15, 10) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x000E0000) == 0x000C0000) ||
+ (instr->Mask(0x00160000) == 0x00120000) ||
+ (instr->Mask(0x00160000) == 0x00140000) ||
+ (instr->Mask(0x20C40000) == 0x00800000) ||
+ (instr->Mask(0x20C60000) == 0x00840000) ||
+ (instr->Mask(0xA0C60000) == 0x80060000) ||
+ (instr->Mask(0xA0C60000) == 0x00860000) ||
+ (instr->Mask(0xA0C60000) == 0x00460000) ||
+ (instr->Mask(0xA0CE0000) == 0x80860000) ||
+ (instr->Mask(0xA0CE0000) == 0x804E0000) ||
+ (instr->Mask(0xA0CE0000) == 0x000E0000) ||
+ (instr->Mask(0xA0D60000) == 0x00160000) ||
+ (instr->Mask(0xA0D60000) == 0x80560000) ||
+ (instr->Mask(0xA0D60000) == 0x80960000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPIntegerConvert(instr);
+ }
+ } else if (instr->Bits(14, 10) == 16) {
+ const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
+ if ((instr->Mask(0x80180000) != 0) ||
+ (masked_A0DF8000 == 0x00020000) ||
+ (masked_A0DF8000 == 0x00030000) ||
+ (masked_A0DF8000 == 0x00068000) ||
+ (masked_A0DF8000 == 0x00428000) ||
+ (masked_A0DF8000 == 0x00430000) ||
+ (masked_A0DF8000 == 0x00468000) ||
+ (instr->Mask(0xA0D80000) == 0x00800000) ||
+ (instr->Mask(0xA0DE0000) == 0x00C00000) ||
+ (instr->Mask(0xA0DF0000) == 0x00C30000) ||
+ (instr->Mask(0xA0DC0000) == 0x00C40000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPDataProcessing1Source(instr);
+ }
+ } else if (instr->Bits(13, 10) == 8) {
+ if ((instr->Bits(15, 14) != 0) ||
+ (instr->Bits(2, 0) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPCompare(instr);
+ }
+ } else if (instr->Bits(12, 10) == 4) {
+ if ((instr->Bits(9, 5) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPImmediate(instr);
+ }
+ } else {
+ if (instr->Mask(0x80800000) != 0x00000000) {
+ V::VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 1: {
+ V::VisitFPConditionalCompare(instr);
+ break;
+ }
+ case 2: {
+ if ((instr->Bits(15, 14) == 0x3) ||
+ (instr->Mask(0x00009000) == 0x00009000) ||
+ (instr->Mask(0x0000A000) == 0x0000A000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPDataProcessing2Source(instr);
+ }
+ break;
+ }
+ case 3: {
+ V::VisitFPConditionalSelect(instr);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ }
+ }
+ }
+ } else {
+ // Bit 30 == 1 has been handled earlier.
+ ASSERT(instr->Bit(30) == 0);
+ if (instr->Mask(0xA0800000) != 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPDataProcessing3Source(instr);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
+ // TODO(all): Implement Advanced SIMD load/store instruction decode.
+ ASSERT(instr->Bits(29, 25) == 0x6);
+ V::VisitUnimplemented(instr);
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
+ // TODO(all): Implement Advanced SIMD data processing instruction decode.
+ ASSERT(instr->Bits(27, 25) == 0x7);
+ V::VisitUnimplemented(instr);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DECODER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/decoder-arm64.cc b/deps/v8/src/arm64/decoder-arm64.cc
new file mode 100644
index 0000000000..a9829f0abc
--- /dev/null
+++ b/deps/v8/src/arm64/decoder-arm64.cc
@@ -0,0 +1,109 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "globals.h"
+#include "utils.h"
+#include "arm64/decoder-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_front(new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::InsertVisitorBefore(
+ DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.insert(it, new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::InsertVisitorAfter(
+ DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ it++;
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
+ visitors_.remove(visitor);
+}
+
+
+#define DEFINE_VISITOR_CALLERS(A) \
+ void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \
+ if (!(instr->Mask(A##FMask) == A##Fixed)) { \
+ ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ } \
+ std::list<DecoderVisitor*>::iterator it; \
+ for (it = visitors_.begin(); it != visitors_.end(); it++) { \
+ (*it)->Visit##A(instr); \
+ } \
+ }
+VISITOR_LIST(DEFINE_VISITOR_CALLERS)
+#undef DEFINE_VISITOR_CALLERS
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/decoder-arm64.h b/deps/v8/src/arm64/decoder-arm64.h
new file mode 100644
index 0000000000..e48f741bf5
--- /dev/null
+++ b/deps/v8/src/arm64/decoder-arm64.h
@@ -0,0 +1,210 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_DECODER_ARM64_H_
+#define V8_ARM64_DECODER_ARM64_H_
+
+#include <list>
+
+#include "globals.h"
+#include "arm64/instructions-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// List macro containing all visitors needed by the decoder class.
+
+#define VISITOR_LIST(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadStorePairNonTemporal) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(Unallocated) \
+ V(Unimplemented)
+
+// The Visitor interface. Disassembler and simulator (and other tools)
+// must provide implementations for all of these functions.
+class DecoderVisitor {
+ public:
+ virtual ~DecoderVisitor() {}
+
+ #define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+};
+
+
+// A visitor that dispatches to a list of visitors.
+class DispatchingDecoderVisitor : public DecoderVisitor {
+ public:
+ DispatchingDecoderVisitor() {}
+ virtual ~DispatchingDecoderVisitor() {}
+
+ // Register a new visitor class with the decoder.
+ // Decode() will call the corresponding visitor method from all registered
+ // visitor classes when decoding reaches the leaf node of the instruction
+ // decode tree.
+ // Visitors are called in the order.
+ // A visitor can only be registered once.
+ // Registering an already registered visitor will update its position.
+ //
+ // d.AppendVisitor(V1);
+ // d.AppendVisitor(V2);
+ // d.PrependVisitor(V2); // Move V2 at the start of the list.
+ // d.InsertVisitorBefore(V3, V2);
+ // d.AppendVisitor(V4);
+ // d.AppendVisitor(V4); // No effect.
+ //
+ // d.Decode(i);
+ //
+ // will call in order visitor methods in V3, V2, V1, V4.
+ void AppendVisitor(DecoderVisitor* visitor);
+ void PrependVisitor(DecoderVisitor* visitor);
+ void InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+ void InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+
+ // Remove a previously registered visitor class from the list of visitors
+ // stored by the decoder.
+ void RemoveVisitor(DecoderVisitor* visitor);
+
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ // Visitors are registered in a list.
+ std::list<DecoderVisitor*> visitors_;
+};
+
+
+template<typename V>
+class Decoder : public V {
+ public:
+ Decoder() {}
+ virtual ~Decoder() {}
+
+ // Top-level instruction decoder function. Decodes an instruction and calls
+ // the visitor functions registered with the Decoder class.
+ virtual void Decode(Instruction *instr);
+
+ private:
+ // Decode the PC relative addressing instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x0.
+ void DecodePCRelAddressing(Instruction* instr);
+
+ // Decode the add/subtract immediate instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x1.
+ void DecodeAddSubImmediate(Instruction* instr);
+
+ // Decode the branch, system command, and exception generation parts of
+ // the instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
+ void DecodeBranchSystemException(Instruction* instr);
+
+ // Decode the load and store parts of the instruction tree, and call
+ // the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
+ void DecodeLoadStore(Instruction* instr);
+
+ // Decode the logical immediate and move wide immediate parts of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x2.
+ void DecodeLogical(Instruction* instr);
+
+ // Decode the bitfield and extraction parts of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x3.
+ void DecodeBitfieldExtract(Instruction* instr);
+
+ // Decode the data processing parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
+ void DecodeDataProcessing(Instruction* instr);
+
+ // Decode the floating point parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0xE, 0xF}.
+ void DecodeFP(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 29:25 = 0x6.
+ void DecodeAdvSIMDLoadStore(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) data processing part of the instruction
+ // tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:25 = 0x7.
+ void DecodeAdvSIMDDataProcessing(Instruction* instr);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DECODER_ARM64_H_
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
new file mode 100644
index 0000000000..93cb5176d2
--- /dev/null
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -0,0 +1,388 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::patch_size() {
+ // Size of the code used to patch lazy bailout points.
+ // Patching is done by Deoptimizer::DeoptimizeFunction.
+ return 4 * kInstructionSize;
+}
+
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ // TODO(jkummerow): if (FLAG_zap_code_space), make the code object's
+ // entry sequence unusable (see other architectures).
+
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+ Address code_start_address = code->instruction_start();
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+
+ PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
+ patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ patcher.blr(ip0);
+ patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
+
+ ASSERT((prev_call_address == NULL) ||
+ (call_address >= prev_call_address + patch_size()));
+ ASSERT(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+ for (int i = 0; i < Register::NumRegisters(); i++) {
+ input_->SetRegister(i, 0);
+ }
+
+ // TODO(all): Do we also need to set a value to csp?
+ input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on ARM64 in the input frame.
+ return false;
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler_);
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(x0.code(), params);
+ output_frame->SetRegister(x1.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
+#define __ masm()->
+
+void Deoptimizer::EntryGenerator::Generate() {
+ GeneratePrologue();
+
+ // TODO(all): This code needs to be revisited. We probably only need to save
+ // caller-saved registers here. Callee-saved registers can be stored directly
+ // in the input frame.
+
+ // Save all allocatable floating point registers.
+ CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSizeInBits,
+ FPRegister::kAllocatableFPRegisters);
+ __ PushCPURegList(saved_fp_registers);
+
+ // We save all the registers expcept jssp, sp and lr.
+ CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
+ saved_registers.Combine(fp);
+ __ PushCPURegList(saved_registers);
+
+ const int kSavedRegistersAreaSize =
+ (saved_registers.Count() * kXRegSize) +
+ (saved_fp_registers.Count() * kDRegSize);
+
+ // Floating point registers are saved on the stack above core registers.
+ const int kFPRegistersOffset = saved_registers.Count() * kXRegSize;
+
+ // Get the bailout id from the stack.
+ Register bailout_id = x2;
+ __ Peek(bailout_id, kSavedRegistersAreaSize);
+
+ Register code_object = x3;
+ Register fp_to_sp = x4;
+ // Get the address of the location in the code object. This is the return
+ // address for lazy deoptimization.
+ __ Mov(code_object, lr);
+ // Compute the fp-to-sp delta, and correct one word for bailout id.
+ __ Add(fp_to_sp, masm()->StackPointer(),
+ kSavedRegistersAreaSize + (1 * kPointerSize));
+ __ Sub(fp_to_sp, fp, fp_to_sp);
+
+ // Allocate a new deoptimizer object.
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Mov(x1, type());
+ // Following arguments are already loaded:
+ // - x2: bailout id
+ // - x3: code object address
+ // - x4: fp-to-sp delta
+ __ Mov(x5, ExternalReference::isolate_address(isolate()));
+
+ {
+ // Call Deoptimizer::New().
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+ }
+
+ // Preserve "deoptimizer" object in register x0.
+ Register deoptimizer = x0;
+
+ // Get the input frame descriptor pointer.
+ __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
+
+ // Copy core registers into the input frame.
+ CPURegList copy_to_input = saved_registers;
+ for (int i = 0; i < saved_registers.Count(); i++) {
+ // TODO(all): Look for opportunities to optimize this by using ldp/stp.
+ __ Peek(x2, i * kPointerSize);
+ CPURegister current_reg = copy_to_input.PopLowestIndex();
+ int offset = (current_reg.code() * kPointerSize) +
+ FrameDescription::registers_offset();
+ __ Str(x2, MemOperand(x1, offset));
+ }
+
+ // Copy FP registers to the input frame.
+ for (int i = 0; i < saved_fp_registers.Count(); i++) {
+ // TODO(all): Look for opportunities to optimize this by using ldp/stp.
+ int dst_offset = FrameDescription::double_registers_offset() +
+ (i * kDoubleSize);
+ int src_offset = kFPRegistersOffset + (i * kDoubleSize);
+ __ Peek(x2, src_offset);
+ __ Str(x2, MemOperand(x1, dst_offset));
+ }
+
+ // Remove the bailout id and the saved registers from the stack.
+ __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
+
+ // Compute a pointer to the unwinding limit in register x2; that is
+ // the first stack slot not part of the input frame.
+ Register unwind_limit = x2;
+ __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
+ __ Add(unwind_limit, unwind_limit, __ StackPointer());
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add(x3, x1, FrameDescription::frame_content_offset());
+ Label pop_loop;
+ Label pop_loop_header;
+ __ B(&pop_loop_header);
+ __ Bind(&pop_loop);
+ __ Pop(x4);
+ __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
+ __ Bind(&pop_loop_header);
+ __ Cmp(unwind_limit, __ StackPointer());
+ __ B(ne, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ Push(x0); // Preserve deoptimizer object across call.
+
+ {
+ // Call Deoptimizer::ComputeOutputFrames().
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate()), 1);
+ }
+ __ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
+ __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
+ __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
+ __ B(&outer_loop_header);
+
+ __ Bind(&outer_push_loop);
+ Register current_frame = x2;
+ __ Ldr(current_frame, MemOperand(x0, 0));
+ __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
+ __ B(&inner_loop_header);
+
+ __ Bind(&inner_push_loop);
+ __ Sub(x3, x3, kPointerSize);
+ __ Add(x6, current_frame, x3);
+ __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
+ __ Push(x7);
+ __ Bind(&inner_loop_header);
+ __ Cbnz(x3, &inner_push_loop);
+
+ __ Add(x0, x0, kPointerSize);
+ __ Bind(&outer_loop_header);
+ __ Cmp(x0, x1);
+ __ B(lt, &outer_push_loop);
+
+ __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
+ ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
+ !saved_fp_registers.IncludesAliasOf(fp_zero) &&
+ !saved_fp_registers.IncludesAliasOf(fp_scratch));
+ int src_offset = FrameDescription::double_registers_offset();
+ while (!saved_fp_registers.IsEmpty()) {
+ const CPURegister reg = saved_fp_registers.PopLowestIndex();
+ __ Ldr(reg, MemOperand(x1, src_offset));
+ src_offset += kDoubleSize;
+ }
+
+ // Push state from the last output frame.
+ __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
+ __ Push(x6);
+
+ // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
+ // stack, then pops it all into registers. Here, we try to load it directly
+ // into the relevant registers. Is this correct? If so, we should improve the
+ // ARM code.
+
+ // TODO(all): This code needs to be revisited, We probably don't need to
+ // restore all the registers as fullcodegen does not keep live values in
+ // registers (note that at least fp must be restored though).
+
+ // Restore registers from the last output frame.
+ // Note that lr is not in the list of saved_registers and will be restored
+ // later. We can use it to hold the address of last output frame while
+ // reloading the other registers.
+ ASSERT(!saved_registers.IncludesAliasOf(lr));
+ Register last_output_frame = lr;
+ __ Mov(last_output_frame, current_frame);
+
+ // We don't need to restore x7 as it will be clobbered later to hold the
+ // continuation address.
+ Register continuation = x7;
+ saved_registers.Remove(continuation);
+
+ while (!saved_registers.IsEmpty()) {
+ // TODO(all): Look for opportunities to optimize this by using ldp.
+ CPURegister current_reg = saved_registers.PopLowestIndex();
+ int offset = (current_reg.code() * kPointerSize) +
+ FrameDescription::registers_offset();
+ __ Ldr(current_reg, MemOperand(last_output_frame, offset));
+ }
+
+ __ Ldr(continuation, MemOperand(last_output_frame,
+ FrameDescription::continuation_offset()));
+ __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
+ __ InitializeRootRegister();
+ __ Br(continuation);
+}
+
+
+// Size of an entry of the second level deopt table.
+// This is the code size generated by GeneratePrologue for one entry.
+const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ UseScratchRegisterScope temps(masm());
+ Register entry_id = temps.AcquireX();
+
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
+ Label done;
+ {
+ InstructionAccurateScope scope(masm());
+
+ // The number of entry will never exceed kMaxNumberOfEntries.
+ // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
+ // a movz instruction to load the entry id.
+ ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
+
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ movz(entry_id, i);
+ __ b(&done);
+ ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ }
+ }
+ __ Bind(&done);
+ __ Push(entry_id);
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
new file mode 100644
index 0000000000..ed3e928796
--- /dev/null
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -0,0 +1,1856 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "disasm.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+
+Disassembler::Disassembler() {
+ buffer_size_ = 256;
+ buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
+ buffer_pos_ = 0;
+ own_buffer_ = true;
+}
+
+
+Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+ buffer_size_ = buffer_size;
+ buffer_ = text_buffer;
+ buffer_pos_ = 0;
+ own_buffer_ = false;
+}
+
+
+Disassembler::~Disassembler() {
+ if (own_buffer_) {
+ free(buffer_);
+ }
+}
+
+
+char* Disassembler::GetOutput() {
+ return buffer_;
+}
+
+
+void Disassembler::VisitAddSubImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
+ (instr->ImmAddSub() == 0) ? true : false;
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rns, 'IAddSub";
+ const char *form_cmp = "'Rns, 'IAddSub";
+ const char *form_mov = "'Rds, 'Rns";
+
+ switch (instr->Mask(AddSubImmediateMask)) {
+ case ADD_w_imm:
+ case ADD_x_imm: {
+ mnemonic = "add";
+ if (stack_op) {
+ mnemonic = "mov";
+ form = form_mov;
+ }
+ break;
+ }
+ case ADDS_w_imm:
+ case ADDS_x_imm: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_imm:
+ case SUB_x_imm: mnemonic = "sub"; break;
+ case SUBS_w_imm:
+ case SUBS_x_imm: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HDP";
+ const char *form_cmp = "'Rn, 'Rm'HDP";
+ const char *form_neg = "'Rd, 'Rm'HDP";
+
+ switch (instr->Mask(AddSubShiftedMask)) {
+ case ADD_w_shift:
+ case ADD_x_shift: mnemonic = "add"; break;
+ case ADDS_w_shift:
+ case ADDS_x_shift: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_shift:
+ case SUB_x_shift: {
+ mnemonic = "sub";
+ if (rn_is_zr) {
+ mnemonic = "neg";
+ form = form_neg;
+ }
+ break;
+ }
+ case SUBS_w_shift:
+ case SUBS_x_shift: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ } else if (rn_is_zr) {
+ mnemonic = "negs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubExtended(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ const char *mnemonic = "";
+ Extend mode = static_cast<Extend>(instr->ExtendMode());
+ const char *form = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
+ const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+
+ switch (instr->Mask(AddSubExtendedMask)) {
+ case ADD_w_ext:
+ case ADD_x_ext: mnemonic = "add"; break;
+ case ADDS_w_ext:
+ case ADDS_x_ext: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_ext:
+ case SUB_x_ext: mnemonic = "sub"; break;
+ case SUBS_w_ext:
+ case SUBS_x_ext: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_neg = "'Rd, 'Rm";
+
+ switch (instr->Mask(AddSubWithCarryMask)) {
+ case ADC_w:
+ case ADC_x: mnemonic = "adc"; break;
+ case ADCS_w:
+ case ADCS_x: mnemonic = "adcs"; break;
+ case SBC_w:
+ case SBC_x: {
+ mnemonic = "sbc";
+ if (rn_is_zr) {
+ mnemonic = "ngc";
+ form = form_neg;
+ }
+ break;
+ }
+ case SBCS_w:
+ case SBCS_x: {
+ mnemonic = "sbcs";
+ if (rn_is_zr) {
+ mnemonic = "ngcs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLogicalImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rn, 'ITri";
+
+ if (instr->ImmLogical() == 0) {
+ // The immediate encoded in the instruction is not in the expected format.
+ Format(instr, "unallocated", "(LogicalImmediate)");
+ return;
+ }
+
+ switch (instr->Mask(LogicalImmediateMask)) {
+ case AND_w_imm:
+ case AND_x_imm: mnemonic = "and"; break;
+ case ORR_w_imm:
+ case ORR_x_imm: {
+ mnemonic = "orr";
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
+ mnemonic = "mov";
+ form = "'Rds, 'ITri";
+ }
+ break;
+ }
+ case EOR_w_imm:
+ case EOR_x_imm: mnemonic = "eor"; break;
+ case ANDS_w_imm:
+ case ANDS_x_imm: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'ITri";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+ ASSERT((reg_size == kXRegSizeInBits) ||
+ ((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
+
+ // Test for movz: 16-bits set at positions 0, 16, 32 or 48.
+ if (((value & 0xffffffffffff0000UL) == 0UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0UL) ||
+ ((value & 0xffff0000ffffffffUL) == 0UL) ||
+ ((value & 0x0000ffffffffffffUL) == 0UL)) {
+ return true;
+ }
+
+ // Test for movn: NOT(16-bits set at positions 0, 16, 32 or 48).
+ if ((reg_size == kXRegSizeInBits) &&
+ (((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
+ ((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
+ ((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
+ return true;
+ }
+ if ((reg_size == kWRegSizeInBits) &&
+ (((value & 0xffff0000) == 0xffff0000) ||
+ ((value & 0x0000ffff) == 0x0000ffff))) {
+ return true;
+ }
+ return false;
+}
+
+
+void Disassembler::VisitLogicalShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HLo";
+
+ switch (instr->Mask(LogicalShiftedMask)) {
+ case AND_w:
+ case AND_x: mnemonic = "and"; break;
+ case BIC_w:
+ case BIC_x: mnemonic = "bic"; break;
+ case EOR_w:
+ case EOR_x: mnemonic = "eor"; break;
+ case EON_w:
+ case EON_x: mnemonic = "eon"; break;
+ case BICS_w:
+ case BICS_x: mnemonic = "bics"; break;
+ case ANDS_w:
+ case ANDS_x: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'Rm'HLo";
+ }
+ break;
+ }
+ case ORR_w:
+ case ORR_x: {
+ mnemonic = "orr";
+ if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
+ mnemonic = "mov";
+ form = "'Rd, 'Rm";
+ }
+ break;
+ }
+ case ORN_w:
+ case ORN_x: {
+ mnemonic = "orn";
+ if (rn_is_zr) {
+ mnemonic = "mvn";
+ form = "'Rd, 'Rm'HLo";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareRegisterMask)) {
+ case CCMN_w:
+ case CCMN_x: mnemonic = "ccmn"; break;
+ case CCMP_w:
+ case CCMP_x: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareImmediateMask)) {
+ case CCMN_w_imm:
+ case CCMN_x_imm: mnemonic = "ccmn"; break;
+ case CCMP_w_imm:
+ case CCMP_x_imm: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalSelect(Instruction* instr) {
+ bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
+ bool rn_is_rm = (instr->Rn() == instr->Rm());
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
+ const char *form_test = "'Rd, 'CInv";
+ const char *form_update = "'Rd, 'Rn, 'CInv";
+
+ Condition cond = static_cast<Condition>(instr->Condition());
+ bool invertible_cond = (cond != al) && (cond != nv);
+
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: mnemonic = "csel"; break;
+ case CSINC_w:
+ case CSINC_x: {
+ mnemonic = "csinc";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "cset";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinc";
+ form = form_update;
+ }
+ break;
+ }
+ case CSINV_w:
+ case CSINV_x: {
+ mnemonic = "csinv";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "csetm";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinv";
+ form = form_update;
+ }
+ break;
+ }
+ case CSNEG_w:
+ case CSNEG_x: {
+ mnemonic = "csneg";
+ if (rn_is_rm && invertible_cond) {
+ mnemonic = "cneg";
+ form = form_update;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitBitfield(Instruction* instr) {
+ unsigned s = instr->ImmS();
+ unsigned r = instr->ImmR();
+ unsigned rd_size_minus_1 =
+ ((instr->SixtyFourBits() == 1) ? kXRegSizeInBits : kWRegSizeInBits) - 1;
+ const char *mnemonic = "";
+ const char *form = "";
+ const char *form_shift_right = "'Rd, 'Rn, 'IBr";
+ const char *form_extend = "'Rd, 'Wn";
+ const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+ const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+ const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+
+ switch (instr->Mask(BitfieldMask)) {
+ case SBFM_w:
+ case SBFM_x: {
+ mnemonic = "sbfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "sxtb";
+ } else if (s == 15) {
+ mnemonic = "sxth";
+ } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
+ mnemonic = "sxtw";
+ } else {
+ form = form_bfx;
+ }
+ } else if (s == rd_size_minus_1) {
+ mnemonic = "asr";
+ form = form_shift_right;
+ } else if (s < r) {
+ mnemonic = "sbfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case UBFM_w:
+ case UBFM_x: {
+ mnemonic = "ubfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "uxtb";
+ } else if (s == 15) {
+ mnemonic = "uxth";
+ } else {
+ form = form_bfx;
+ }
+ }
+ if (s == rd_size_minus_1) {
+ mnemonic = "lsr";
+ form = form_shift_right;
+ } else if (r == s + 1) {
+ mnemonic = "lsl";
+ form = form_lsl;
+ } else if (s < r) {
+ mnemonic = "ubfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case BFM_w:
+ case BFM_x: {
+ mnemonic = "bfxil";
+ form = form_bfx;
+ if (s < r) {
+ mnemonic = "bfi";
+ form = form_bfiz;
+ }
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitExtract(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+
+ switch (instr->Mask(ExtractMask)) {
+ case EXTR_w:
+ case EXTR_x: {
+ if (instr->Rn() == instr->Rm()) {
+ mnemonic = "ror";
+ form = "'Rd, 'Rn, 'IExtract";
+ } else {
+ mnemonic = "extr";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+ // ADRP is not implemented.
+ default: Format(instr, "unimplemented", "(PCRelAddressing)");
+ }
+}
+
+
+void Disassembler::VisitConditionalBranch(Instruction* instr) {
+ switch (instr->Mask(ConditionalBranchMask)) {
+ case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Xn";
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BR: mnemonic = "br"; break;
+ case BLR: mnemonic = "blr"; break;
+ case RET: {
+ mnemonic = "ret";
+ if (instr->Rn() == kLinkRegCode) {
+ form = NULL;
+ }
+ break;
+ }
+ default: form = "(UnconditionalBranchToRegister)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'BImmUncn";
+
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case B: mnemonic = "b"; break;
+ case BL: mnemonic = "bl"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn";
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(RBIT, "rbit");
+ FORMAT(REV16, "rev16");
+ FORMAT(REV, "rev");
+ FORMAT(CLZ, "clz");
+ FORMAT(CLS, "cls");
+ #undef FORMAT
+ case REV32_x: mnemonic = "rev32"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Rd, 'Rn, 'Rm";
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(UDIV, "udiv");
+ FORMAT(SDIV, "sdiv");
+ FORMAT(LSLV, "lsl");
+ FORMAT(LSRV, "lsr");
+ FORMAT(ASRV, "asr");
+ FORMAT(RORV, "ror");
+ #undef FORMAT
+ default: form = "(DataProcessing2Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
+ bool ra_is_zr = RaIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
+ const char *form_rrr = "'Rd, 'Rn, 'Rm";
+ const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+ const char *form_xww = "'Xd, 'Wn, 'Wm";
+ const char *form_xxx = "'Xd, 'Xn, 'Xm";
+
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x: {
+ mnemonic = "madd";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mul";
+ form = form_rrr;
+ }
+ break;
+ }
+ case MSUB_w:
+ case MSUB_x: {
+ mnemonic = "msub";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mneg";
+ form = form_rrr;
+ }
+ break;
+ }
+ case SMADDL_x: {
+ mnemonic = "smaddl";
+ if (ra_is_zr) {
+ mnemonic = "smull";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMSUBL_x: {
+ mnemonic = "smsubl";
+ if (ra_is_zr) {
+ mnemonic = "smnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMADDL_x: {
+ mnemonic = "umaddl";
+ if (ra_is_zr) {
+ mnemonic = "umull";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMSUBL_x: {
+ mnemonic = "umsubl";
+ if (ra_is_zr) {
+ mnemonic = "umnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMULH_x: {
+ mnemonic = "smulh";
+ form = form_xxx;
+ break;
+ }
+ case UMULH_x: {
+ mnemonic = "umulh";
+ form = form_xxx;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCompareBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rt, 'BImmCmpa";
+
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w:
+ case CBZ_x: mnemonic = "cbz"; break;
+ case CBNZ_w:
+ case CBNZ_x: mnemonic = "cbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitTestBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ // If the top bit of the immediate is clear, the tested register is
+ // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
+ // encoded in bit 31 of the instruction, we can reuse the Rt form, which
+ // uses bit 31 (normally "sf") to choose the register size.
+ const char *form = "'Rt, 'IS, 'BImmTest";
+
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: mnemonic = "tbz"; break;
+ case TBNZ: mnemonic = "tbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'IMoveImm";
+
+ // Print the shift separately for movk, to make it clear which half word will
+ // be overwritten. Movn and movz print the computed immediate, which includes
+ // shift calculation.
+ switch (instr->Mask(MoveWideImmediateMask)) {
+ case MOVN_w:
+ case MOVN_x: mnemonic = "movn"; break;
+ case MOVZ_w:
+ case MOVZ_x: mnemonic = "movz"; break;
+ case MOVK_w:
+ case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_LIST(V) \
+ V(STRB_w, "strb", "'Wt") \
+ V(STRH_w, "strh", "'Wt") \
+ V(STR_w, "str", "'Wt") \
+ V(STR_x, "str", "'Xt") \
+ V(LDRB_w, "ldrb", "'Wt") \
+ V(LDRH_w, "ldrh", "'Wt") \
+ V(LDR_w, "ldr", "'Wt") \
+ V(LDR_x, "ldr", "'Xt") \
+ V(LDRSB_x, "ldrsb", "'Xt") \
+ V(LDRSH_x, "ldrsh", "'Xt") \
+ V(LDRSW_x, "ldrsw", "'Xt") \
+ V(LDRSB_w, "ldrsb", "'Wt") \
+ V(LDRSH_w, "ldrsh", "'Wt") \
+ V(STR_s, "str", "'St") \
+ V(STR_d, "str", "'Dt") \
+ V(LDR_s, "ldr", "'St") \
+ V(LDR_d, "ldr", "'Dt")
+
+void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePreIndex)";
+
+ switch (instr->Mask(LoadStorePreIndexMask)) {
+ #define LS_PREINDEX(A, B, C) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+ LOAD_STORE_LIST(LS_PREINDEX)
+ #undef LS_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePostIndex)";
+
+ switch (instr->Mask(LoadStorePostIndexMask)) {
+ #define LS_POSTINDEX(A, B, C) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+ LOAD_STORE_LIST(LS_POSTINDEX)
+ #undef LS_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreUnsignedOffset)";
+
+ switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
+ #define LS_UNSIGNEDOFFSET(A, B, C) \
+ case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+ LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
+ #undef LS_UNSIGNEDOFFSET
+ case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreRegisterOffset)";
+
+ switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
+ #define LS_REGISTEROFFSET(A, B, C) \
+ case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+ LOAD_STORE_LIST(LS_REGISTEROFFSET)
+ #undef LS_REGISTEROFFSET
+ case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Wt, ['Xns'ILS]";
+ const char *form_x = "'Xt, ['Xns'ILS]";
+ const char *form_s = "'St, ['Xns'ILS]";
+ const char *form_d = "'Dt, ['Xns'ILS]";
+
+ switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
+ case STURB_w: mnemonic = "sturb"; break;
+ case STURH_w: mnemonic = "sturh"; break;
+ case STUR_w: mnemonic = "stur"; break;
+ case STUR_x: mnemonic = "stur"; form = form_x; break;
+ case STUR_s: mnemonic = "stur"; form = form_s; break;
+ case STUR_d: mnemonic = "stur"; form = form_d; break;
+ case LDURB_w: mnemonic = "ldurb"; break;
+ case LDURH_w: mnemonic = "ldurh"; break;
+ case LDUR_w: mnemonic = "ldur"; break;
+ case LDUR_x: mnemonic = "ldur"; form = form_x; break;
+ case LDUR_s: mnemonic = "ldur"; form = form_s; break;
+ case LDUR_d: mnemonic = "ldur"; form = form_d; break;
+ case LDURSB_x: form = form_x; // Fall through.
+ case LDURSB_w: mnemonic = "ldursb"; break;
+ case LDURSH_x: form = form_x; // Fall through.
+ case LDURSH_w: mnemonic = "ldursh"; break;
+ case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+ default: form = "(LoadStoreUnscaledOffset)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadLiteral(Instruction* instr) {
+ const char *mnemonic = "ldr";
+ const char *form = "(LoadLiteral)";
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
+ case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
+ case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
+ case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
+ default: mnemonic = "unimplemented";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_PAIR_LIST(V) \
+ V(STP_w, "stp", "'Wt, 'Wt2", "4") \
+ V(LDP_w, "ldp", "'Wt, 'Wt2", "4") \
+ V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \
+ V(STP_x, "stp", "'Xt, 'Xt2", "8") \
+ V(LDP_x, "ldp", "'Xt, 'Xt2", "8") \
+ V(STP_s, "stp", "'St, 'St2", "4") \
+ V(LDP_s, "ldp", "'St, 'St2", "4") \
+ V(STP_d, "stp", "'Dt, 'Dt2", "8") \
+ V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
+
+void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPostIndex)";
+
+ switch (instr->Mask(LoadStorePairPostIndexMask)) {
+ #define LSP_POSTINDEX(A, B, C, D) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+ LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
+ #undef LSP_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPreIndex)";
+
+ switch (instr->Mask(LoadStorePairPreIndexMask)) {
+ #define LSP_PREINDEX(A, B, C, D) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+ LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
+ #undef LSP_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairOffset)";
+
+ switch (instr->Mask(LoadStorePairOffsetMask)) {
+ #define LSP_OFFSET(A, B, C, D) \
+ case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+ LOAD_STORE_PAIR_LIST(LSP_OFFSET)
+ #undef LSP_OFFSET
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStorePairNonTemporalMask)) {
+ case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ default: form = "(LoadStorePairNonTemporal)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm";
+ const char *form_zero = "'Fn, #0.0";
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s_zero:
+ case FCMP_d_zero: form = form_zero; // Fall through.
+ case FCMP_s:
+ case FCMP_d: mnemonic = "fcmp"; break;
+ default: form = "(FPCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: mnemonic = "fccmp"; break;
+ case FCCMPE_s:
+ case FCCMPE_d: mnemonic = "fccmpe"; break;
+ default: form = "(FPConditionalCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s:
+ case FCSEL_d: mnemonic = "fcsel"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fd, 'Fn";
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMOV, "fmov");
+ FORMAT(FABS, "fabs");
+ FORMAT(FNEG, "fneg");
+ FORMAT(FSQRT, "fsqrt");
+ FORMAT(FRINTN, "frintn");
+ FORMAT(FRINTP, "frintp");
+ FORMAT(FRINTM, "frintm");
+ FORMAT(FRINTZ, "frintz");
+ FORMAT(FRINTA, "frinta");
+ FORMAT(FRINTX, "frintx");
+ FORMAT(FRINTI, "frinti");
+ #undef FORMAT
+ case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
+ case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+ default: form = "(FPDataProcessing1Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm";
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMUL, "fmul");
+ FORMAT(FDIV, "fdiv");
+ FORMAT(FADD, "fadd");
+ FORMAT(FSUB, "fsub");
+ FORMAT(FMAX, "fmax");
+ FORMAT(FMIN, "fmin");
+ FORMAT(FMAXNM, "fmaxnm");
+ FORMAT(FMINNM, "fminnm");
+ FORMAT(FNMUL, "fnmul");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMADD, "fmadd");
+ FORMAT(FMSUB, "fmsub");
+ FORMAT(FNMADD, "fnmadd");
+ FORMAT(FNMSUB, "fnmsub");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "(FPImmediate)";
+
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
+ case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(FPIntegerConvert)";
+ const char *form_rf = "'Rd, 'Fn";
+ const char *form_fr = "'Fd, 'Rn";
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FMOV_ws:
+ case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+ case FMOV_sw:
+ case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FCVTAS_ws:
+ case FCVTAS_xs:
+ case FCVTAS_wd:
+ case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+ case FCVTAU_ws:
+ case FCVTAU_xs:
+ case FCVTAU_wd:
+ case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
+ case FCVTMS_ws:
+ case FCVTMS_xs:
+ case FCVTMS_wd:
+ case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+ case FCVTMU_ws:
+ case FCVTMU_xs:
+ case FCVTMU_wd:
+ case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+ case FCVTNS_ws:
+ case FCVTNS_xs:
+ case FCVTNS_wd:
+ case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+ case FCVTNU_ws:
+ case FCVTNU_xs:
+ case FCVTNU_wd:
+ case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+ case FCVTZU_xd:
+ case FCVTZU_ws:
+ case FCVTZU_wd:
+ case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+ case FCVTZS_xd:
+ case FCVTZS_wd:
+ case FCVTZS_xs:
+ case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case SCVTF_sw:
+ case SCVTF_sx:
+ case SCVTF_dw:
+ case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw:
+ case UCVTF_sx:
+ case UCVTF_dw:
+ case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Fn, 'IFPFBits";
+ const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ case FCVTZS_ws_fixed:
+ case FCVTZS_xs_fixed:
+ case FCVTZS_wd_fixed:
+ case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+ case FCVTZU_ws_fixed:
+ case FCVTZU_xs_fixed:
+ case FCVTZU_wd_fixed:
+ case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+ case SCVTF_sw_fixed:
+ case SCVTF_sx_fixed:
+ case SCVTF_dw_fixed:
+ case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw_fixed:
+ case UCVTF_sx_fixed:
+ case UCVTF_dw_fixed:
+ case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ const char *mnemonic = "unimplemented";
+ const char *form = "(System)";
+
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ mnemonic = "mrs";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "'Xt, nzcv"; break;
+ case FPCR: form = "'Xt, fpcr"; break;
+ default: form = "'Xt, (unknown)"; break;
+ }
+ break;
+ }
+ case MSR: {
+ mnemonic = "msr";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "nzcv, 'Xt"; break;
+ case FPCR: form = "fpcr, 'Xt"; break;
+ default: form = "(unknown), 'Xt"; break;
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: {
+ mnemonic = "nop";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ switch (instr->Mask(MemBarrierMask)) {
+ case DMB: {
+ mnemonic = "dmb";
+ form = "'M";
+ break;
+ }
+ case DSB: {
+ mnemonic = "dsb";
+ form = "'M";
+ break;
+ }
+ case ISB: {
+ mnemonic = "isb";
+ form = NULL;
+ break;
+ }
+ }
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitException(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'IDebug";
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: mnemonic = "hlt"; break;
+ case BRK: mnemonic = "brk"; break;
+ case SVC: mnemonic = "svc"; break;
+ case HVC: mnemonic = "hvc"; break;
+ case SMC: mnemonic = "smc"; break;
+ case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
+ case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
+ case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
+ default: form = "(Exception)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnimplemented(Instruction* instr) {
+ Format(instr, "unimplemented", "(Unimplemented)");
+}
+
+
+void Disassembler::VisitUnallocated(Instruction* instr) {
+ Format(instr, "unallocated", "(Unallocated)");
+}
+
+
+void Disassembler::ProcessOutput(Instruction* /*instr*/) {
+ // The base disasm does nothing more than disassembling into a buffer.
+}
+
+
+void Disassembler::Format(Instruction* instr, const char* mnemonic,
+ const char* format) {
+ // TODO(mcapewel) don't think I can use the instr address here - there needs
+ // to be a base address too
+ ASSERT(mnemonic != NULL);
+ ResetOutput();
+ Substitute(instr, mnemonic);
+ if (format != NULL) {
+ buffer_[buffer_pos_++] = ' ';
+ Substitute(instr, format);
+ }
+ buffer_[buffer_pos_] = 0;
+ ProcessOutput(instr);
+}
+
+
+void Disassembler::Substitute(Instruction* instr, const char* string) {
+ char chr = *string++;
+ while (chr != '\0') {
+ if (chr == '\'') {
+ string += SubstituteField(instr, string);
+ } else {
+ buffer_[buffer_pos_++] = chr;
+ }
+ chr = *string++;
+ }
+}
+
+
+int Disassembler::SubstituteField(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'R': // Register. X or W, selected by sf bit.
+ case 'F': // FP Register. S or D, selected by type field.
+ case 'W':
+ case 'X':
+ case 'S':
+ case 'D': return SubstituteRegisterField(instr, format);
+ case 'I': return SubstituteImmediateField(instr, format);
+ case 'L': return SubstituteLiteralField(instr, format);
+ case 'H': return SubstituteShiftField(instr, format);
+ case 'P': return SubstitutePrefetchField(instr, format);
+ case 'C': return SubstituteConditionField(instr, format);
+ case 'E': return SubstituteExtendField(instr, format);
+ case 'A': return SubstitutePCRelAddressField(instr, format);
+ case 'B': return SubstituteBranchTargetField(instr, format);
+ case 'O': return SubstituteLSRegOffsetField(instr, format);
+ case 'M': return SubstituteBarrierField(instr, format);
+ default: {
+ UNREACHABLE();
+ return 1;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteRegisterField(Instruction* instr,
+ const char* format) {
+ unsigned reg_num = 0;
+ unsigned field_len = 2;
+ switch (format[1]) {
+ case 'd': reg_num = instr->Rd(); break;
+ case 'n': reg_num = instr->Rn(); break;
+ case 'm': reg_num = instr->Rm(); break;
+ case 'a': reg_num = instr->Ra(); break;
+ case 't': {
+ if (format[2] == '2') {
+ reg_num = instr->Rt2();
+ field_len = 3;
+ } else {
+ reg_num = instr->Rt();
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Increase field length for registers tagged as stack.
+ if (format[2] == 's') {
+ field_len = 3;
+ }
+
+ char reg_type;
+ if (format[0] == 'R') {
+ // Register type is R: use sf bit to choose X and W.
+ reg_type = instr->SixtyFourBits() ? 'x' : 'w';
+ } else if (format[0] == 'F') {
+ // Floating-point register: use type field to choose S or D.
+ reg_type = ((instr->FPType() & 1) == 0) ? 's' : 'd';
+ } else {
+ // Register type is specified. Make it lower case.
+ reg_type = format[0] + 0x20;
+ }
+
+ if ((reg_num != kZeroRegCode) || (reg_type == 's') || (reg_type == 'd')) {
+ // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31.
+
+ // Filter special registers
+ if ((reg_type == 'x') && (reg_num == 27)) {
+ AppendToOutput("cp");
+ } else if ((reg_type == 'x') && (reg_num == 28)) {
+ AppendToOutput("jssp");
+ } else if ((reg_type == 'x') && (reg_num == 29)) {
+ AppendToOutput("fp");
+ } else if ((reg_type == 'x') && (reg_num == 30)) {
+ AppendToOutput("lr");
+ } else {
+ AppendToOutput("%c%d", reg_type, reg_num);
+ }
+ } else if (format[2] == 's') {
+ // Disassemble w31/x31 as stack pointer wcsp/csp.
+ AppendToOutput("%s", (reg_type == 'w') ? "wcsp" : "csp");
+ } else {
+ // Disassemble w31/x31 as zero register wzr/xzr.
+ AppendToOutput("%czr", reg_type);
+ }
+
+ return field_len;
+}
+
+
+int Disassembler::SubstituteImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'I');
+
+ switch (format[1]) {
+ case 'M': { // IMoveImm or IMoveLSL.
+ if (format[5] == 'I') {
+ uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
+ AppendToOutput("#0x%" PRIx64, imm);
+ } else {
+ ASSERT(format[5] == 'L');
+ AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
+ if (instr->ShiftMoveWide() > 0) {
+ AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
+ }
+ }
+ return 8;
+ }
+ case 'L': {
+ switch (format[2]) {
+ case 'L': { // ILLiteral - Immediate Load Literal.
+ AppendToOutput("pc%+" PRId64,
+ instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+ return 9;
+ }
+ case 'S': { // ILS - Immediate Load/Store.
+ if (instr->ImmLS() != 0) {
+ AppendToOutput(", #%" PRId64, instr->ImmLS());
+ }
+ return 3;
+ }
+ case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
+ if (instr->ImmLSPair() != 0) {
+ // format[3] is the scale value. Convert to a number.
+ int scale = format[3] - 0x30;
+ AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
+ }
+ return 4;
+ }
+ case 'U': { // ILU - Immediate Load/Store Unsigned.
+ if (instr->ImmLSUnsigned() != 0) {
+ AppendToOutput(", #%" PRIu64,
+ instr->ImmLSUnsigned() << instr->SizeLS());
+ }
+ return 3;
+ }
+ }
+ }
+ case 'C': { // ICondB - Immediate Conditional Branch.
+ int64_t offset = instr->ImmCondBranch() << 2;
+ char sign = (offset >= 0) ? '+' : '-';
+ AppendToOutput("#%c0x%" PRIx64, sign, offset);
+ return 6;
+ }
+ case 'A': { // IAddSub.
+ ASSERT(instr->ShiftAddSub() <= 1);
+ int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
+ AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
+ return 7;
+ }
+ case 'F': { // IFPSingle, IFPDouble or IFPFBits.
+ if (format[3] == 'F') { // IFPFBits.
+ AppendToOutput("#%d", 64 - instr->FPScale());
+ return 8;
+ } else {
+ AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
+ format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
+ return 9;
+ }
+ }
+ case 'T': { // ITri - Immediate Triangular Encoded.
+ AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
+ return 4;
+ }
+ case 'N': { // INzcv.
+ int nzcv = (instr->Nzcv() << Flags_offset);
+ AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
+ ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+ ((nzcv & CFlag) == 0) ? 'c' : 'C',
+ ((nzcv & VFlag) == 0) ? 'v' : 'V');
+ return 5;
+ }
+ case 'P': { // IP - Conditional compare.
+ AppendToOutput("#%d", instr->ImmCondCmp());
+ return 2;
+ }
+ case 'B': { // Bitfields.
+ return SubstituteBitfieldImmediateField(instr, format);
+ }
+ case 'E': { // IExtract.
+ AppendToOutput("#%d", instr->ImmS());
+ return 8;
+ }
+ case 'S': { // IS - Test and branch bit.
+ AppendToOutput("#%d", (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40());
+ return 2;
+ }
+ case 'D': { // IDebug - HLT and BRK instructions.
+ AppendToOutput("#0x%x", instr->ImmException());
+ return 6;
+ }
+ default: {
+ UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ unsigned r = instr->ImmR();
+ unsigned s = instr->ImmS();
+
+ switch (format[2]) {
+ case 'r': { // IBr.
+ AppendToOutput("#%d", r);
+ return 3;
+ }
+ case 's': { // IBs+1 or IBs-r+1.
+ if (format[3] == '+') {
+ AppendToOutput("#%d", s + 1);
+ return 5;
+ } else {
+ ASSERT(format[3] == '-');
+ AppendToOutput("#%d", s - r + 1);
+ return 7;
+ }
+ }
+ case 'Z': { // IBZ-r.
+ ASSERT((format[3] == '-') && (format[4] == 'r'));
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ AppendToOutput("#%d", reg_size - r);
+ return 5;
+ }
+ default: {
+ UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteLiteralField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "LValue", 6) == 0);
+ USE(format);
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit:
+ case LDR_x_lit:
+ case LDR_s_lit:
+ case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
+ default: UNREACHABLE();
+ }
+
+ return 6;
+}
+
+
+int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
+ ASSERT(format[0] == 'H');
+ ASSERT(instr->ShiftDP() <= 0x3);
+
+ switch (format[1]) {
+ case 'D': { // HDP.
+ ASSERT(instr->ShiftDP() != ROR);
+ } // Fall through.
+ case 'L': { // HLo.
+ if (instr->ImmDPShift() != 0) {
+ const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
+ AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
+ instr->ImmDPShift());
+ }
+ return 3;
+ }
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+
+int Disassembler::SubstituteConditionField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'C');
+ const char* condition_code[] = { "eq", "ne", "hs", "lo",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "al", "nv" };
+ int cond;
+ switch (format[1]) {
+ case 'B': cond = instr->ConditionBranch(); break;
+ case 'I': {
+ cond = InvertCondition(static_cast<Condition>(instr->Condition()));
+ break;
+ }
+ default: cond = instr->Condition();
+ }
+ AppendToOutput("%s", condition_code[cond]);
+ return 4;
+}
+
+
+int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
+ const char* format) {
+ USE(format);
+ ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
+
+ int offset = instr->ImmPCRel();
+
+ // Only ADR (AddrPCRelByte) is supported.
+ ASSERT(strcmp(format, "AddrPCRelByte") == 0);
+
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ AppendToOutput("#%c0x%x (addr %p)", sign, offset,
+ instr->InstructionAtOffset(offset, Instruction::NO_CHECK));
+ return 13;
+}
+
+
+int Disassembler::SubstituteBranchTargetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "BImm", 4) == 0);
+
+ int64_t offset = 0;
+ switch (format[5]) {
+ // BImmUncn - unconditional branch immediate.
+ case 'n': offset = instr->ImmUncondBranch(); break;
+ // BImmCond - conditional branch immediate.
+ case 'o': offset = instr->ImmCondBranch(); break;
+ // BImmCmpa - compare and branch immediate.
+ case 'm': offset = instr->ImmCmpBranch(); break;
+ // BImmTest - test and branch immediate.
+ case 'e': offset = instr->ImmTestBranch(); break;
+ default: UNREACHABLE();
+ }
+ offset <<= kInstructionSizeLog2;
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, offset,
+ instr->InstructionAtOffset(offset), Instruction::NO_CHECK);
+ return 8;
+}
+
+
+int Disassembler::SubstituteExtendField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Ext", 3) == 0);
+ ASSERT(instr->ExtendMode() <= 7);
+ USE(format);
+
+ const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
+ "sxtb", "sxth", "sxtw", "sxtx" };
+
+ // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
+ // registers becomes lsl.
+ if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
+ (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
+ (instr->ExtendMode() == UXTX))) {
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(", lsl #%d", instr->ImmExtendShift());
+ }
+ } else {
+ AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(" #%d", instr->ImmExtendShift());
+ }
+ }
+ return 3;
+}
+
+
+int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
+ "undefined", "undefined", "sxtw", "sxtx" };
+ USE(format);
+
+ unsigned shift = instr->ImmShiftLS();
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
+
+ unsigned rm = instr->Rm();
+ if (rm == kZeroRegCode) {
+ AppendToOutput("%czr", reg_type);
+ } else {
+ AppendToOutput("%c%d", reg_type, rm);
+ }
+
+ // Extend mode UXTX is an alias for shift mode LSL here.
+ if (!((ext == UXTX) && (shift == 0))) {
+ AppendToOutput(", %s", extend_mode[ext]);
+ if (shift != 0) {
+ AppendToOutput(" #%d", instr->SizeLS());
+ }
+ }
+ return 9;
+}
+
+
+int Disassembler::SubstitutePrefetchField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'P');
+ USE(format);
+
+ int prefetch_mode = instr->PrefetchMode();
+
+ const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
+ int level = (prefetch_mode >> 1) + 1;
+ const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
+
+ AppendToOutput("p%sl%d%s", ls, level, ks);
+ return 6;
+}
+
+int Disassembler::SubstituteBarrierField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'M');
+ USE(format);
+
+ static const char* options[4][4] = {
+ { "sy (0b0000)", "oshld", "oshst", "osh" },
+ { "sy (0b0100)", "nshld", "nshst", "nsh" },
+ { "sy (0b1000)", "ishld", "ishst", "ish" },
+ { "sy (0b1100)", "ld", "st", "sy" }
+ };
+ int domain = instr->ImmBarrierDomain();
+ int type = instr->ImmBarrierType();
+
+ AppendToOutput("%s", options[domain][type]);
+ return 1;
+}
+
+
+void Disassembler::ResetOutput() {
+ buffer_pos_ = 0;
+ buffer_[buffer_pos_] = 0;
+}
+
+
+void Disassembler::AppendToOutput(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args);
+ va_end(args);
+}
+
+
+void PrintDisassembler::ProcessOutput(Instruction* instr) {
+ fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
+ reinterpret_cast<uint64_t>(instr), instr->InstructionBits(),
+ GetOutput());
+}
+
+} } // namespace v8::internal
+
+
+namespace disasm {
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ unsigned ureg = reg; // Avoid warnings about signed/unsigned comparisons.
+ if (ureg >= v8::internal::kNumberOfRegisters) {
+ return "noreg";
+ }
+ if (ureg == v8::internal::kZeroRegCode) {
+ return "xzr";
+ }
+ v8::internal::OS::SNPrintF(tmp_buffer_, "x%u", ureg);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // ARM64 does not have the concept of a byte register
+ return "nobytereg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ UNREACHABLE(); // ARM64 does not have any XMM registers
+ return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code, so we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+class BufferDisassembler : public v8::internal::Disassembler {
+ public:
+ explicit BufferDisassembler(v8::internal::Vector<char> out_buffer)
+ : out_buffer_(out_buffer) { }
+
+ ~BufferDisassembler() { }
+
+ virtual void ProcessOutput(v8::internal::Instruction* instr) {
+ v8::internal::OS::SNPrintF(out_buffer_, "%s", GetOutput());
+ }
+
+ private:
+ v8::internal::Vector<char> out_buffer_;
+};
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instr) {
+ v8::internal::Decoder<v8::internal::DispatchingDecoderVisitor> decoder;
+ BufferDisassembler disasm(buffer);
+ decoder.AppendVisitor(&disasm);
+
+ decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(instr));
+ return v8::internal::kInstructionSize;
+}
+
+
+int Disassembler::ConstantPoolSizeAt(byte* instr) {
+ return v8::internal::Assembler::ConstantPoolSizeAt(
+ reinterpret_cast<v8::internal::Instruction*>(instr));
+}
+
+
+void Disassembler::Disassemble(FILE* file, byte* start, byte* end) {
+ v8::internal::Decoder<v8::internal::DispatchingDecoderVisitor> decoder;
+ v8::internal::PrintDisassembler disasm(file);
+ decoder.AppendVisitor(&disasm);
+
+ for (byte* pc = start; pc < end; pc += v8::internal::kInstructionSize) {
+ decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(pc));
+ }
+}
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h
new file mode 100644
index 0000000000..8c964a8905
--- /dev/null
+++ b/deps/v8/src/arm64/disasm-arm64.h
@@ -0,0 +1,115 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_DISASM_ARM64_H
+#define V8_ARM64_DISASM_ARM64_H
+
+#include "v8.h"
+
+#include "globals.h"
+#include "utils.h"
+#include "instructions-arm64.h"
+#include "decoder-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+
+class Disassembler: public DecoderVisitor {
+ public:
+ Disassembler();
+ Disassembler(char* text_buffer, int buffer_size);
+ virtual ~Disassembler();
+ char* GetOutput();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ protected:
+ virtual void ProcessOutput(Instruction* instr);
+
+ void Format(Instruction* instr, const char* mnemonic, const char* format);
+ void Substitute(Instruction* instr, const char* string);
+ int SubstituteField(Instruction* instr, const char* format);
+ int SubstituteRegisterField(Instruction* instr, const char* format);
+ int SubstituteImmediateField(Instruction* instr, const char* format);
+ int SubstituteLiteralField(Instruction* instr, const char* format);
+ int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
+ int SubstituteShiftField(Instruction* instr, const char* format);
+ int SubstituteExtendField(Instruction* instr, const char* format);
+ int SubstituteConditionField(Instruction* instr, const char* format);
+ int SubstitutePCRelAddressField(Instruction* instr, const char* format);
+ int SubstituteBranchTargetField(Instruction* instr, const char* format);
+ int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
+ int SubstitutePrefetchField(Instruction* instr, const char* format);
+ int SubstituteBarrierField(Instruction* instr, const char* format);
+
+ bool RdIsZROrSP(Instruction* instr) const {
+ return (instr->Rd() == kZeroRegCode);
+ }
+
+ bool RnIsZROrSP(Instruction* instr) const {
+ return (instr->Rn() == kZeroRegCode);
+ }
+
+ bool RmIsZROrSP(Instruction* instr) const {
+ return (instr->Rm() == kZeroRegCode);
+ }
+
+ bool RaIsZROrSP(Instruction* instr) const {
+ return (instr->Ra() == kZeroRegCode);
+ }
+
+ bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
+
+ void ResetOutput();
+ void AppendToOutput(const char* string, ...);
+
+ char* buffer_;
+ uint32_t buffer_pos_;
+ uint32_t buffer_size_;
+ bool own_buffer_;
+};
+
+
+class PrintDisassembler: public Disassembler {
+ public:
+ explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
+ ~PrintDisassembler() { }
+
+ virtual void ProcessOutput(Instruction* instr);
+
+ private:
+ FILE *stream_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DISASM_ARM64_H
diff --git a/deps/v8/src/arm64/frames-arm64.cc b/deps/v8/src/arm64/frames-arm64.cc
new file mode 100644
index 0000000000..8c1bc20ac1
--- /dev/null
+++ b/deps/v8/src/arm64/frames-arm64.cc
@@ -0,0 +1,65 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "assembler.h"
+#include "assembler-arm64.h"
+#include "assembler-arm64-inl.h"
+#include "frames.h"
+
+namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/frames-arm64.h b/deps/v8/src/arm64/frames-arm64.h
new file mode 100644
index 0000000000..8b56410584
--- /dev/null
+++ b/deps/v8/src/arm64/frames-arm64.h
@@ -0,0 +1,133 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm64/constants-arm64.h"
+#include "arm64/assembler-arm64.h"
+
+#ifndef V8_ARM64_FRAMES_ARM64_H_
+#define V8_ARM64_FRAMES_ARM64_H_
+
+namespace v8 {
+namespace internal {
+
+const int kNumRegs = kNumberOfRegisters;
+// Registers x0-x17 are caller-saved.
+const int kNumJSCallerSaved = 18;
+const RegList kJSCallerSaved = 0x3ffff;
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of eight.
+// TODO(all): Refine this number.
+const int kNumSafepointRegisters = 32;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
+#define kNumSafepointSavedRegisters \
+ CPURegList::GetSafepointSavedRegisters().Count();
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ static const int kFrameSize = 2 * kPointerSize;
+
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
+ static const int kCallerPCOffset = 1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
+ static const int kSPOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kLastExitFrameField = kCodeOffset;
+
+ static const int kConstantPoolOffset = 0; // Not used
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+
+ // There are two words on the stack (saved fp and saved lr) between fp and
+ // the arguments.
+ static const int kLastParameterOffset = 2 * kPointerSize;
+
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_FRAMES_ARM64_H_
diff --git a/deps/v8/src/arm64/full-codegen-arm64.cc b/deps/v8/src/arm64/full-codegen-arm64.cc
new file mode 100644
index 0000000000..d40e74aa27
--- /dev/null
+++ b/deps/v8/src/arm64/full-codegen-arm64.cc
@@ -0,0 +1,5015 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "code-stubs.h"
+#include "codegen.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "isolate-inl.h"
+#include "parser.h"
+#include "scopes.h"
+#include "stub-cache.h"
+
+#include "arm64/code-stubs-arm64.h"
+#include "arm64/macro-assembler-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm), reg_(NoReg) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ if (patch_site_.is_bound()) {
+ ASSERT(info_emitted_);
+ } else {
+ ASSERT(reg_.IsNone());
+ }
+ }
+
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
+ InstructionAccurateScope scope(masm_, 1);
+ ASSERT(!info_emitted_);
+ ASSERT(reg.Is64Bits());
+ ASSERT(!reg.Is(csp));
+ reg_ = reg;
+ __ bind(&patch_site_);
+ __ tbz(xzr, 0, target); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
+ InstructionAccurateScope scope(masm_, 1);
+ ASSERT(!info_emitted_);
+ ASSERT(reg.Is64Bits());
+ ASSERT(!reg.Is(csp));
+ reg_ = reg;
+ __ bind(&patch_site_);
+ __ tbnz(xzr, 0, target); // Never taken before patched.
+ }
+
+ void EmitJumpIfEitherNotSmi(Register reg1, Register reg2, Label* target) {
+ UseScratchRegisterScope temps(masm_);
+ Register temp = temps.AcquireX();
+ __ Orr(temp, reg1, reg2);
+ EmitJumpIfNotSmi(temp, target);
+ }
+
+ void EmitPatchInfo() {
+ Assembler::BlockPoolsScope scope(masm_);
+ InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+ Register reg_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+static void EmitStackCheck(MacroAssembler* masm_,
+ int pointers = 0,
+ Register scratch = jssp) {
+ Isolate* isolate = masm_->isolate();
+ Label ok;
+ ASSERT(jssp.Is(__ StackPointer()));
+ ASSERT(scratch.Is(jssp) == (pointers == 0));
+ if (pointers != 0) {
+ __ Sub(scratch, jssp, pointers * kPointerSize);
+ }
+ __ CompareRoot(scratch, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ Bind(&ok);
+}
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// - x1: the JS function object being called (i.e. ourselves).
+// - cp: our context.
+// - fp: our caller's frame pointer.
+// - jssp: stack pointer.
+// - lr: return address.
+//
+// The function builds a JS frame. See JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ Function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ Debug("stop-at", __LINE__, BREAK);
+ }
+#endif
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ int receiver_offset = info->scope()->num_parameters() * kXRegSize;
+ __ Peek(x10, receiver_offset);
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Poke(x10, receiver_offset);
+
+ __ Bind(&ok);
+ }
+
+
+ // Open a frame scope to indicate that there is a frame on the stack.
+ // The MANUAL indicates that the scope shouldn't actually generate code
+ // to set up the frame because we do it manually below.
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+ // This call emits the following sequence in a way that can be patched for
+ // code ageing support:
+ // Push(lr, fp, cp, x1);
+ // Add(fp, jssp, 2 * kPointerSize);
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(BUILD_FUNCTION_FRAME);
+ info->AddNoFrameRange(0, masm_->pc_offset());
+
+ // Reserve space on the stack for locals.
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
+
+ if (locals_count > 0) {
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, locals_count, x10);
+ }
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ if (FLAG_optimize_for_size) {
+ __ PushMultipleTimes(x10 , locals_count);
+ } else {
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ Mov(x3, loop_iterations);
+ Label loop_header;
+ __ Bind(&loop_header);
+ // Do pushes.
+ __ PushMultipleTimes(x10 , kMaxPushes);
+ __ Subs(x3, x3, 1);
+ __ B(ne, &loop_header);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ __ PushMultipleTimes(x10 , remaining);
+ }
+ }
+ }
+
+ bool function_in_register_x1 = true;
+
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ // Argument to NewContext is the function, which is still in x1.
+ Comment cmnt(masm_, "[ Allocate context");
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Mov(x10, Operand(info->scope()->GetScopeInfo()));
+ __ Push(x1, x10);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ }
+ function_in_register_x1 = false;
+ // Context is returned in x0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ Mov(cp, x0);
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = info->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ Ldr(x10, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ Str(x10, target);
+
+ // Update the write barrier.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+ }
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register_x1) {
+ // Load this again, if it's used by the local context below.
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ Mov(x3, x1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset + offset);
+ __ Mov(x1, Smi::FromInt(num_parameters));
+ __ Push(x3, x2, x1);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (strict_mode() == STRICT) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
+ }
+ ArgumentsAccessStub stub(type);
+ __ CallStub(&stub);
+
+ SetVar(arguments, x0, x1, x2);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ { Comment cmnt(masm_, "[ Declarations");
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ VariableDeclaration* function = scope()->function();
+ ASSERT(function->proxy()->var()->mode() == CONST ||
+ function->proxy()->var()->mode() == CONST_LEGACY);
+ ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ VisitVariableDeclaration(function);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ EmitStackCheck(masm_);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+
+ // Force emission of the pools, so they don't get emitted in the middle
+ // of the back edge table.
+ masm()->CheckVeneerPool(true, false);
+ masm()->CheckConstPool(true, false);
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ __ Mov(x0, Smi::FromInt(0));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ Mov(x2, Operand(profiling_counter_));
+ __ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset));
+ __ Subs(x3, x3, Smi::FromInt(delta));
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = FLAG_interrupt_budget >> 4;
+ }
+ __ Mov(x2, Operand(profiling_counter_));
+ __ Mov(x3, Smi::FromInt(reset_value));
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ ASSERT(jssp.Is(__ StackPointer()));
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ // Block literal pools whilst emitting back edge code.
+ Assembler::BlockPoolsScope block_const_pool(masm_);
+ Label ok;
+
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ __ B(pl, &ok);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+
+ EmitProfilingCounterReset();
+
+ __ Bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+
+ if (return_label_.is_bound()) {
+ __ B(&return_label_);
+
+ } else {
+ __ Bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in x0.
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ ASSERT(x0.Is(result_register()));
+ }
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ B(pl, &ok);
+ __ Push(x0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(x0);
+ EmitProfilingCounterReset();
+ __ Bind(&ok);
+
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence. This sequence can get patched when the debugger is used. See
+ // debug-arm64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
+ {
+ InstructionAccurateScope scope(masm_,
+ Assembler::kJSRetSequenceInstructions);
+ CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ __ RecordJSReturn();
+ // This code is generated using Assembler methods rather than Macro
+ // Assembler methods because it will be patched later on, and so the size
+ // of the generated code must be consistent.
+ const Register& current_sp = __ StackPointer();
+ // Nothing ensures 16 bytes alignment here.
+ ASSERT(!current_sp.Is(csp));
+ __ mov(current_sp, fp);
+ int no_frame_start = masm_->pc_offset();
+ __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
+ // Drop the arguments and receiver and return.
+ // TODO(all): This implementation is overkill as it supports 2**31+1
+ // arguments, consider how to improve it without creating a security
+ // hole.
+ __ LoadLiteral(ip0, 3 * kInstructionSize);
+ __ add(current_sp, current_sp, ip0);
+ __ ret();
+ __ dc64(kXRegSize * (info_->scope()->num_parameters() + 1));
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ // For simplicity we always test the accumulator register.
+ codegen()->GetVar(result_register(), var);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+ // Root values have no side effects.
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ Mov(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ Mov(result_register(), Operand(lit));
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ Mov(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ Poke(reg, 0);
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Mov(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == materialize_false);
+ __ Bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ Bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(&done);
+ __ Bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ Bind(materialize_true);
+ __ LoadRoot(x10, Heap::kTrueValueRootIndex);
+ __ B(&done);
+ __ Bind(materialize_false);
+ __ LoadRoot(x10, Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == true_label_);
+ ASSERT(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(x10, value_root_index);
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) {
+ __ B(true_label_);
+ }
+ } else {
+ if (false_label_ != fall_through_) {
+ __ B(false_label_);
+ }
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through);
+}
+
+
+// If (cond), branch to if_true.
+// If (!cond), branch to if_false.
+// fall_through is used as an optimization in cases where only one branch
+// instruction is necessary.
+void FullCodeGenerator::Split(Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ B(cond, if_true);
+ } else if (if_true == fall_through) {
+ ASSERT(if_false != fall_through);
+ __ B(InvertCondition(cond), if_false);
+ } else {
+ __ B(cond, if_true);
+ __ B(if_false);
+ }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kXRegSize;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return MemOperand(fp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextMemOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ // Use destination as scratch.
+ MemOperand location = VarOperand(var, dest);
+ __ Ldr(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+ Register src,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ ASSERT(!AreAliased(src, scratch0, scratch1));
+ MemOperand location = VarOperand(var, scratch0);
+ __ Str(src, location);
+
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ // scratch0 contains the correct context.
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ // TODO(all): Investigate to see if there is something to work on here.
+ Label skip;
+ if (should_normalize) {
+ __ B(&skip);
+ }
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, NULL);
+ __ Bind(&skip);
+ }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current function
+ // context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (generate_debug_code_) {
+ // Check that we're not inside a with or catch context.
+ __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(x1, Heap::kWithContextMapRootIndex);
+ __ Check(ne, kDeclarationInWithContext);
+ __ CompareRoot(x1, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, kDeclarationInCatchContext);
+ }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(),
+ zone());
+ break;
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Str(x10, StackOperand(variable));
+ }
+ break;
+
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Str(x10, ContextMemOperand(cp, variable->index()));
+ // No write barrier since the_hole_value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ Mov(x2, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
+ : NONE;
+ __ Mov(x1, Smi::FromInt(attr));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
+ __ Push(cp, x2, x1, x0);
+ } else {
+ // Pushing 0 (xzr) indicates no initial value.
+ __ Push(cp, x2, x1, xzr);
+ }
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), script());
+ // Check for stack overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ Str(result_register(), StackOperand(variable));
+ break;
+ }
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ Str(result_register(), ContextMemOperand(cp, variable->index()));
+ int offset = Context::SlotOffset(variable->index());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ x2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ __ Mov(x2, Operand(variable->name()));
+ __ Mov(x1, Smi::FromInt(NONE));
+ __ Push(cp, x2, x1);
+ // Push initial value for function declaration.
+ VisitForStackValue(declaration->fun());
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
+
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+
+ // Load instance object.
+ __ LoadContext(x1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
+ __ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
+
+ // Assign it.
+ __ Str(x1, ContextMemOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ x1,
+ x3,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse info body.
+ Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ Mov(x11, Operand(pairs));
+ Register flags = xzr;
+ if (Smi::FromInt(DeclareGlobalsFlags())) {
+ flags = x10;
+ __ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
+ }
+ __ Push(cp, x11, flags);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ ASM_LOCATION("FullCodeGenerator::VisitSwitchStatement");
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ Bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+
+ // Perform the comparison as if via '==='.
+ __ Peek(x1, 0); // Switch value.
+
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ __ B(ne, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ __ Bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ B(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
+ __ Drop(1);
+ __ B(clause->body_target());
+ __ Bind(&skip);
+
+ __ Cbnz(x0, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ Bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ B(nested_statement.break_label());
+ } else {
+ __ B(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ Bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ Bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
+ Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+ // TODO(all): This visitor probably needs better comments and a revisit.
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
+ Register null_value = x15;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Cmp(x0, null_value);
+ __ B(eq, &exit);
+
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(x0, &convert);
+ __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
+ __ Bind(&convert);
+ __ Push(x0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Bind(&done_convert);
+ __ Push(x0);
+
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ __ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ B(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ Bind(&call_runtime);
+ __ Push(x0); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array, no_descriptors;
+ __ Ldr(x2, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x2, Heap::kMetaMapRootIndex, &fixed_array);
+
+ // We got a map in register x0. Get the enumeration cache from it.
+ __ Bind(&use_cache);
+
+ __ EnumLengthUntagged(x1, x0);
+ __ Cbz(x1, &no_descriptors);
+
+ __ LoadInstanceDescriptors(x0, x2);
+ __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(x2,
+ FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ Push(x0); // Map.
+ __ Mov(x0, Smi::FromInt(0));
+ // Push enumeration cache, enumeration cache length (as smi) and zero.
+ __ SmiTag(x1);
+ __ Push(x2, x1, x0);
+ __ B(&loop);
+
+ __ Bind(&no_descriptors);
+ __ Drop(1);
+ __ B(&exit);
+
+ // We got a fixed array in register x0. Iterate through that.
+ __ Bind(&fixed_array);
+
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ LoadObject(x1, FeedbackVector());
+ __ Mov(x10, Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker));
+ __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
+
+ __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
+ __ Peek(x10, 0); // Get enumerated object.
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ // TODO(all): similar check was done already. Can we avoid it here?
+ __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
+ ASSERT(Smi::FromInt(0) == 0);
+ __ CzeroX(x1, le); // Zero indicates proxy.
+ __ Push(x1, x0); // Smi and array
+ __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset));
+ __ Push(x1, xzr); // Fixed array length (as smi) and initial index.
+
+ // Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ __ Bind(&loop);
+ // Load the current count to x0, load the length to x1.
+ __ PeekPair(x0, x1, 0);
+ __ Cmp(x0, x1); // Compare to the array length.
+ __ B(hs, loop_statement.break_label());
+
+ // Get the current entry of the array into register r3.
+ __ Peek(x10, 2 * kXRegSize);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2));
+ __ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register x10.
+ __ Peek(x2, 3 * kXRegSize);
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ Peek(x1, 4 * kXRegSize);
+ __ Ldr(x11, FieldMemOperand(x1, HeapObject::kMapOffset));
+ __ Cmp(x11, x2);
+ __ B(eq, &update_each);
+
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Cbz(x2, &update_each);
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ Mov(x3, x0);
+ __ Cbz(x0, loop_statement.continue_label());
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register x3.
+ __ Bind(&update_each);
+ __ Mov(result_register(), x3);
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitAssignment(stmt->each());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ Bind(loop_statement.continue_label());
+ // TODO(all): We could use a callee saved register to avoid popping.
+ __ Pop(x0);
+ __ Add(x0, x0, Smi::FromInt(1));
+ __ Push(x0);
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ B(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ Bind(loop_statement.break_label());
+ __ Drop(5);
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ Bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ Register iterator = x0;
+ __ JumpIfRoot(iterator, Heap::kUndefinedValueRootIndex,
+ loop_statement.break_label());
+ __ JumpIfRoot(iterator, Heap::kNullValueRootIndex,
+ loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(iterator, &convert);
+ __ CompareObjectType(iterator, x1, x1, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, &done_convert);
+ __ Bind(&convert);
+ __ Push(iterator);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Bind(&done_convert);
+ __ Push(iterator);
+
+ // Loop entry.
+ __ Bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ Bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ B(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ Bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new space for
+ // nested functions that don't need literals cloning. If we're running with
+ // the --always-opt or the --prepare-always-opt flag, we need to use the
+ // runtime function so that the new function we are creating here gets a
+ // chance to have its code optimized and doesn't just get a copy of the
+ // existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
+ __ Mov(x2, Operand(info));
+ __ CallStub(&stub);
+ } else {
+ __ Mov(x11, Operand(info));
+ __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
+ : Heap::kFalseValueRootIndex);
+ __ Push(cp, x11, x10);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register current = cp;
+ Register next = x10;
+ Register temp = x11;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ }
+ // Load next context in chain.
+ __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ __ Mov(next, current);
+
+ __ Bind(&loop);
+ // Terminate at native context.
+ __ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ // Load next context in chain.
+ __ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
+ __ B(&loop);
+ __ Bind(&fast);
+ }
+
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(var->name()));
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ ASSERT(var->IsContextSlot());
+ Register context = cp;
+ Register next = x10;
+ Register temp = x11;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ }
+ __ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is NULL.
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextMemOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+ __ B(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
+ if (local->mode() == CONST_LEGACY) {
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ } else { // LET || CONST
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ }
+ }
+ __ B(done);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in x2 and the global
+ // object (receiver) in x0.
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(var->name()));
+ CallLoadIC(CONTEXTUAL);
+ context()->Plug(x0);
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, var->IsContextSlot()
+ ? "Context variable"
+ : "Stack variable");
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST_LEGACY &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(x0, var);
+ Label done;
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ __ Bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST_LEGACY);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
+ }
+ context()->Plug(x0);
+ break;
+ }
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+ __ Bind(&slow);
+ Comment cmnt(masm_, "Lookup variable");
+ __ Mov(x1, Operand(var->name()));
+ __ Push(cp, x1); // Context and name.
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Bind(&done);
+ context()->Plug(x0);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // x5 = materialized value (RegExp literal)
+ // x4 = JS function, literals array
+ // x3 = literal index
+ // x2 = RegExp pattern
+ // x1 = RegExp flags
+ // x0 = RegExp literal clone
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ Ldr(x5, FieldMemOperand(x4, literal_offset));
+ __ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
+
+ // Create regexp literal using runtime function.
+ // Result will be in x0.
+ __ Mov(x3, Smi::FromInt(expr->literal_index()));
+ __ Mov(x2, Operand(expr->pattern()));
+ __ Mov(x1, Operand(expr->flags()));
+ __ Push(x4, x3, x2, x1);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+ __ Mov(x5, x0);
+
+ __ Bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&runtime_allocate);
+ __ Mov(x10, Smi::FromInt(size));
+ __ Push(x5, x10);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Pop(x5);
+
+ __ Bind(&allocated);
+ // After this, registers are used as follows:
+ // x0: Newly allocated regexp.
+ // x5: Materialized regexp.
+ // x10, x11, x12: temps.
+ __ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(x10, Heap::kNullValueRootIndex);
+ __ Push(x10);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
+ __ Mov(x2, Smi::FromInt(expr->literal_index()));
+ __ Mov(x1, Operand(constant_properties));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ Mov(x0, Smi::FromInt(flags));
+ int properties_count = constant_properties->length() / 2;
+ const int max_cloned_properties =
+ FastCloneShallowObjectStub::kMaximumClonedProperties;
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
+ properties_count > max_cloned_properties) {
+ __ Push(x3, x2, x1, x0);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in x0.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ Push(x0); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ __ Mov(x2, Operand(key->value()));
+ __ Peek(x1, 0);
+ CallStoreIC(key->LiteralFeedbackId());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ if (property->emit_store()) {
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ __ Push(x0);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ __ Mov(x0, Smi::FromInt(NONE)); // PropertyAttributes
+ __ Push(x0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ VisitForEffect(key);
+ VisitForEffect(value);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ if (property->emit_store()) {
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ __ Push(x0);
+ VisitForStackValue(value);
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ Peek(x10, 0); // Duplicate receiver.
+ __ Push(x10);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ Mov(x10, Smi::FromInt(NONE));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ Peek(x0, 0);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ expr->BuildConstantElements(isolate());
+ int flags = (expr->depth() == 1) ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
+ __ Mov(x2, Smi::FromInt(expr->literal_index()));
+ __ Mov(x1, Operand(constant_elements));
+ if (has_fast_elements && constant_elements_values->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ allocation_site_mode,
+ length);
+ __ CallStub(&stub);
+ __ IncrementCounter(
+ isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11);
+ } else if ((expr->depth() > 1) || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ Mov(x0, Smi::FromInt(flags));
+ __ Push(x3, x2, x1, x0);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+ FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+
+ if (has_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ __ Push(x0);
+ __ Push(Smi::FromInt(expr->literal_index()));
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ Peek(x6, kPointerSize); // Copy of array literal.
+ __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
+ __ Str(result_register(), FieldMemOperand(x1, offset));
+ // Update the write barrier for the array store.
+ __ RecordWriteField(x1, offset, result_register(), x10,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ } else {
+ __ Mov(x3, Smi::FromInt(i));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ __ Drop(1); // literal index
+ context()->PlugTOS();
+ } else {
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
+ Comment cmnt(masm_, "[ Assignment");
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForAccumulatorValue(property->obj());
+ __ Push(result_register());
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case KEYED_PROPERTY:
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ __ Peek(x1, 0);
+ __ Push(x0);
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ Push(x0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
+ op,
+ mode,
+ expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ Mov(x2, Operand(key->value()));
+ // Call load IC. It has arguments receiver and property name x0 and x2.
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ // Call keyed load IC. It has arguments key and receiver in r0 and r1.
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, both_smis, stub_call;
+
+ // Get the arguments.
+ Register left = x1;
+ Register right = x0;
+ Register result = x0;
+ __ Pop(left);
+
+ // Perform combined smi check on both operands.
+ __ Orr(x10, left, right);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(x10, &both_smis);
+
+ __ Bind(&stub_call);
+ BinaryOpICStub stub(op, mode);
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ __ B(&done);
+
+ __ Bind(&both_smis);
+ // Smi case. This code works in the same way as the smi-smi case in the type
+ // recording binary operation stub, see
+ // BinaryOpStub::GenerateSmiSmiOperation for comments.
+ // TODO(all): That doesn't exist any more. Where are the comments?
+ //
+ // The set of operations that needs to be supported here is controlled by
+ // FullCodeGenerator::ShouldInlineSmiCase().
+ switch (op) {
+ case Token::SAR:
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Asr(result, left, right);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Lsl(result, left, right);
+ break;
+ case Token::SHR: {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ __ Tbnz(left, kXSignBit, &stub_call);
+ __ Bind(&right_not_zero);
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Lsr(result, left, right);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ }
+ case Token::ADD:
+ __ Adds(x10, left, right);
+ __ B(vs, &stub_call);
+ __ Mov(result, x10);
+ break;
+ case Token::SUB:
+ __ Subs(x10, left, right);
+ __ B(vs, &stub_call);
+ __ Mov(result, x10);
+ break;
+ case Token::MUL: {
+ Label not_minus_zero, done;
+ __ Smulh(x10, left, right);
+ __ Cbnz(x10, &not_minus_zero);
+ __ Eor(x11, left, right);
+ __ Tbnz(x11, kXSignBit, &stub_call);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Mov(result, x10);
+ __ B(&done);
+ __ Bind(&not_minus_zero);
+ __ Cls(x11, x10);
+ __ Cmp(x11, kXRegSizeInBits - kSmiShift);
+ __ B(lt, &stub_call);
+ __ SmiTag(result, x10);
+ __ Bind(&done);
+ break;
+ }
+ case Token::BIT_OR:
+ __ Orr(result, left, right);
+ break;
+ case Token::BIT_AND:
+ __ And(result, left, right);
+ break;
+ case Token::BIT_XOR:
+ __ Eor(result, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode) {
+ __ Pop(x1);
+ BinaryOpICStub stub(op, mode);
+ JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ ASSERT(expr->IsValidLeftHandSide());
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ Push(x0); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
+ // this copy.
+ __ Mov(x1, x0);
+ __ Pop(x0); // Restore value.
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ CallStoreIC();
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Push(x0); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Mov(x1, x0);
+ __ Pop(x2, x0);
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ Str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Mov(x10, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ Mov(x11, Operand(name));
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ // jssp[0] : mode.
+ // jssp[8] : name.
+ // jssp[16] : context.
+ // jssp[24] : value.
+ __ Push(x0, cp, x11, x10);
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Token::Value op) {
+ ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ Mov(x2, Operand(var->name()));
+ __ Ldr(x1, GlobalObjectMemOperand());
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ ASSERT(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ Push(x0);
+ __ Mov(x0, Operand(var->name()));
+ __ Push(cp, x0); // Context and name.
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ Bind(&skip);
+ }
+
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
+ __ Mov(x10, Operand(var->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ // Perform the assignment.
+ __ Bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ MemOperand location = VarOperand(var, x1);
+ if (FLAG_debug_code && op == Token::INIT_LET) {
+ __ Ldr(x10, location);
+ __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
+ }
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+ }
+ // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(x1);
+
+ CallStoreIC(expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
+ // Assignment to a property, using a keyed store IC.
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ // TODO(all): Could we pass this in registers rather than on the stack?
+ __ Pop(x1, x2); // Key and object holding the property.
+
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ VisitForAccumulatorValue(expr->obj());
+ EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(x0);
+ } else {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Pop(x1);
+ EmitKeyedPropertyLoad(expr);
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ // All calls must have a predictable size in full-codegen code to ensure that
+ // the debugger can patch them correctly.
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithIC(Call* expr) {
+ ASM_LOCATION("EmitCallWithIC");
+
+ Expression* callee = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ CallFunctionFlags flags;
+ // Get the target function.
+ if (callee->IsVariableProxy()) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ flags = NO_CALL_FUNCTION_FLAGS;
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ Peek(x0, 0);
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+ flags = CALL_AS_METHOD;
+ }
+
+ // Load the arguments.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, flags);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ RecordJSReturnSite(expr);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ Peek(x1, 0);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, CALL_AS_METHOD);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+
+ Handle<Object> uninitialized =
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ LoadObject(x2, FeedbackVector());
+ __ Mov(x3, Smi::FromInt(expr->CallFeedbackSlot()));
+
+ // Record call targets in unoptimized code.
+ CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
+ __ Peek(x1, (arg_count + 1) * kXRegSize);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
+ // Prepare to push a copy of the first argument or undefined if it doesn't
+ // exist.
+ if (arg_count > 0) {
+ __ Peek(x10, arg_count * kXRegSize);
+ } else {
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ }
+
+ // Prepare to push the receiver of the enclosing function.
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
+
+ // Push.
+ __ Push(x10, x11);
+
+ // Prepare to push the language mode.
+ __ Mov(x10, Smi::FromInt(strict_mode()));
+ // Prepare to push the start position of the scope the calls resides in.
+ __ Mov(x11, Smi::FromInt(scope()->start_position()));
+
+ // Push.
+ __ Push(x10, x11);
+
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ {
+ PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10); // Reserved receiver slot.
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ Peek(x10, (arg_count + 1) * kPointerSize);
+ __ Push(x10);
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // The runtime call returns a pair of values in x0 (function) and
+ // x1 (receiver). Touch up the stack with the right values.
+ __ PokePair(x1, x0, arg_count * kPointerSize);
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+
+ // Call the evaluated function.
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ Peek(x1, (arg_count + 1) * kXRegSize);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
+
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+ // Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
+ Label slow, done;
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+ }
+
+ __ Bind(&slow);
+ // Call the runtime to find the function to call (returned in x0)
+ // and the object holding it (returned in x1).
+ __ Push(context_register());
+ __ Mov(x10, Operand(proxy->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Push(x0, x1); // Receiver, function.
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ B(&call);
+ __ Bind(&done);
+ // Push function.
+ __ Push(x0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the undefined to the call function stub.
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x1);
+ __ Bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot.
+ EmitCallWithStub(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (property->key()->IsPropertyName()) {
+ EmitCallWithIC(expr);
+ } else {
+ EmitKeyedCallWithIC(expr, property->key());
+ }
+
+ } else {
+ ASSERT(call_type == Call::OTHER_CALL);
+ // Call to an arbitrary expression not handled specially above.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ }
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x1);
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into x1 and x0.
+ __ Mov(x0, arg_count);
+ __ Peek(x1, arg_count * kXRegSize);
+
+ // Record call targets in unoptimized code.
+ Handle<Object> uninitialized =
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ LoadObject(x2, FeedbackVector());
+ __ Mov(x3, Smi::FromInt(expr->CallNewFeedbackSlot()));
+
+ CallConstructStub stub(RECORD_CALL_TARGET);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestAndSplit(x0, kSmiTagMask, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true,
+ if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ Tbnz(x11, Map::kIsUndetectable, if_false);
+ __ Ldrb(x12, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Cmp(x12, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(lt, if_false);
+ __ Cmp(x12, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(le, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ge, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ Tst(x11, 1 << Map::kIsUndetectable);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ne, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Register object = x0;
+ __ AssertNotSmi(object);
+
+ Register map = x10;
+ Register bitfield2 = x11;
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ldrb(bitfield2, FieldMemOperand(map, Map::kBitField2Offset));
+ __ Tbnz(bitfield2, Map::kStringWrapperSafeForDefaultValueOf, &skip_lookup);
+
+ // Check for fast case object. Generate false result for slow case object.
+ Register props = x12;
+ Register props_map = x12;
+ Register hash_table_map = x13;
+ __ Ldr(props, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ldr(props_map, FieldMemOperand(props, HeapObject::kMapOffset));
+ __ LoadRoot(hash_table_map, Heap::kHashTableMapRootIndex);
+ __ Cmp(props_map, hash_table_map);
+ __ B(eq, if_false);
+
+ // Look for valueOf name in the descriptor array, and indicate false if found.
+ // Since we omit an enumeration index check, if it is added via a transition
+ // that shares its descriptor array, this is a false positive.
+ Label loop, done;
+
+ // Skip loop if no descriptors are valid.
+ Register descriptors = x12;
+ Register descriptors_length = x13;
+ __ NumberOfOwnDescriptors(descriptors_length, map);
+ __ Cbz(descriptors_length, &done);
+
+ __ LoadInstanceDescriptors(map, descriptors);
+
+ // Calculate the end of the descriptor array.
+ Register descriptors_end = x14;
+ __ Mov(x15, DescriptorArray::kDescriptorSize);
+ __ Mul(descriptors_length, descriptors_length, x15);
+ // Calculate location of the first key name.
+ __ Add(descriptors, descriptors,
+ DescriptorArray::kFirstOffset - kHeapObjectTag);
+ // Calculate the end of the descriptor array.
+ __ Add(descriptors_end, descriptors,
+ Operand(descriptors_length, LSL, kPointerSizeLog2));
+
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // string "valueOf" the result is false.
+ Register valueof_string = x1;
+ int descriptor_size = DescriptorArray::kDescriptorSize * kPointerSize;
+ __ Mov(valueof_string, Operand(isolate()->factory()->value_of_string()));
+ __ Bind(&loop);
+ __ Ldr(x15, MemOperand(descriptors, descriptor_size, PostIndex));
+ __ Cmp(x15, valueof_string);
+ __ B(eq, if_false);
+ __ Cmp(descriptors, descriptors_end);
+ __ B(ne, &loop);
+
+ __ Bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ Ldrb(x2, FieldMemOperand(map, Map::kBitField2Offset));
+ __ Orr(x2, x2, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ Strb(x2, FieldMemOperand(map, Map::kBitField2Offset));
+
+ __ Bind(&skip_lookup);
+
+ // If a valueOf property is not found on the object check that its prototype
+ // is the unmodified String prototype. If not result is false.
+ Register prototype = x1;
+ Register global_idx = x2;
+ Register native_context = x2;
+ Register string_proto = x3;
+ Register proto_map = x4;
+ __ Ldr(prototype, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ JumpIfSmi(prototype, if_false);
+ __ Ldr(proto_map, FieldMemOperand(prototype, HeapObject::kMapOffset));
+ __ Ldr(global_idx, GlobalObjectMemOperand());
+ __ Ldr(native_context,
+ FieldMemOperand(global_idx, GlobalObject::kNativeContextOffset));
+ __ Ldr(string_proto,
+ ContextMemOperand(native_context,
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ Cmp(proto_map, string_proto);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_FUNCTION_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Only a HeapNumber can be -0.0, so return false if we have something else.
+ __ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+
+ // Test the bit pattern.
+ __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
+ __ Cmp(x10, 1); // Set V on 0x8000000000000000.
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(vs, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_REGEXP_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
+ __ Cmp(x1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ Bind(&check_frame_marker);
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ Pop(x1);
+ __ Cmp(x0, x1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in x1.
+ VisitForAccumulatorValue(args->at(0));
+ __ Mov(x1, x0);
+ __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ Label exit;
+ // Get the number of formal parameters.
+ __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ Ldr(x12, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x13, MemOperand(x12, StandardFrameConstants::kContextOffset));
+ __ Cmp(x13, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ Ldr(x0, MemOperand(x12, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ Bind(&exit);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitClassOf");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(x0, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ // x10: object's map.
+ // x11: object's type.
+ __ B(lt, &null);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ B(eq, &function);
+
+ __ Cmp(x11, LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ B(eq, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
+ __ Ldr(x12, FieldMemOperand(x10, Map::kConstructorOffset));
+ __ JumpIfNotObjectType(x12, x13, x14, JS_FUNCTION_TYPE,
+ &non_function_constructor);
+
+ // x12 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ Ldr(x13, FieldMemOperand(x12, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x0,
+ FieldMemOperand(x13, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ B(&done);
+
+ // Functions have class 'Function'.
+ __ Bind(&function);
+ __ LoadRoot(x0, Heap::kfunction_class_stringRootIndex);
+ __ B(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ Bind(&non_function_constructor);
+ __ LoadRoot(x0, Heap::kObject_stringRootIndex);
+ __ B(&done);
+
+ // Non-JS objects have class null.
+ __ Bind(&null);
+ __ LoadRoot(x0, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ Bind(&done);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 3);
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallRuntime(Runtime::kHiddenLog, 2);
+ }
+
+ // Finally, we're expected to leave a value on the top of the stack.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitValueOf");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(x0, &done);
+ // If the object is not a value type, return the object.
+ __ JumpIfNotObjectType(x0, x10, x11, JS_VALUE_TYPE, &done);
+ __ Ldr(x0, FieldMemOperand(x0, JSValue::kValueOffset));
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done, not_date_object;
+ Register object = x0;
+ Register result = x0;
+ Register stamp_addr = x10;
+ Register stamp_cache = x11;
+
+ __ JumpIfSmi(object, &not_date_object);
+ __ JumpIfNotObjectType(object, x10, x10, JS_DATE_TYPE, &not_date_object);
+
+ if (index->value() == 0) {
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ B(&done);
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ Mov(x10, stamp);
+ __ Ldr(stamp_addr, MemOperand(x10));
+ __ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Cmp(stamp_addr, stamp_cache);
+ __ B(ne, &runtime);
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ B(&done);
+ }
+
+ __ Bind(&runtime);
+ __ Mov(x1, index);
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ B(&done);
+ }
+
+ __ Bind(&not_date_object);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = x0;
+ Register index = x1;
+ Register value = x2;
+ Register scratch = x10;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(value, index);
+
+ if (FLAG_debug_code) {
+ __ AssertSmi(value, kNonSmiValue);
+ __ AssertSmi(index, kNonSmiIndex);
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
+ one_byte_seq_type);
+ }
+
+ __ Add(scratch, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ Strb(value, MemOperand(scratch, index));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = x0;
+ Register index = x1;
+ Register value = x2;
+ Register scratch = x10;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(value, index);
+
+ if (FLAG_debug_code) {
+ __ AssertSmi(value, kNonSmiValue);
+ __ AssertSmi(index, kNonSmiIndex);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
+ two_byte_seq_type);
+ }
+
+ __ Add(scratch, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ Strh(value, MemOperand(scratch, index, LSL, 1));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+ // Load the arguments on the stack and call the MathPow stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ Pop(x1);
+ // x0 = value.
+ // x1 = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(x1, &done);
+
+ // If the object is not a value type, return the value.
+ __ JumpIfNotObjectType(x1, x10, x11, JS_VALUE_TYPE, &done);
+
+ // Store the value.
+ __ Str(x0, FieldMemOperand(x1, JSValue::kValueOffset));
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ Mov(x10, x0);
+ __ RecordWriteField(
+ x1, JSValue::kValueOffset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument into x0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+
+ NumberToStringStub stub;
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ Register code = x0;
+ Register result = x1;
+
+ StringCharFromCodeGenerator generator(code, result);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = x1;
+ Register index = x0;
+ Register result = x3;
+
+ __ Pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ __ Bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = x1;
+ Register index = x0;
+ Register result = x0;
+
+ __ Pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ x3,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ __ Bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ B(&done);
+
+ __ Bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger conversion.
+ __ Mov(result, Smi::FromInt(0));
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ __ Pop(x1);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub;
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
+ // Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallRuntime(Runtime::kMath_log, 1);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
+ // Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(x0, &runtime);
+ __ JumpIfNotObjectType(x0, x1, x1, JS_FUNCTION_TYPE, &runtime);
+
+ // InvokeFunction requires the function in x1. Move it in there.
+ __ Mov(x1, x0);
+ ParameterCount count(arg_count);
+ __ InvokeFunction(x1, count, CALL_FUNCTION, NullCallWrapper());
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ B(&done);
+
+ __ Bind(&runtime);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ Bind(&done);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(2));
+ __ Pop(x1, x2);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->native_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort(kAttemptToUseUndefinedCache);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(x0);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = x0;
+ Register cache = x1;
+ __ Ldr(cache, GlobalObjectMemOperand());
+ __ Ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
+ __ Ldr(cache, ContextMemOperand(cache,
+ Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ Ldr(cache,
+ FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+ Label done;
+ __ Ldrsw(x2, UntagSmiFieldMemOperand(cache,
+ JSFunctionResultCache::kFingerOffset));
+ __ Add(x3, cache, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x3, x3, Operand(x2, LSL, kPointerSizeLog2));
+
+ // Load the key and data from the cache.
+ __ Ldp(x2, x3, MemOperand(x3));
+
+ __ Cmp(key, x2);
+ __ CmovX(x0, x3, eq);
+ __ B(eq, &done);
+
+ // Call runtime to perform the lookup.
+ __ Push(cache, key);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ Tst(x10, String::kContainsCachedArrayIndexMask);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(x0);
+
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ IndexFromHash(x10, x0);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
+
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ Register array = x0;
+ Register result = x0;
+ Register elements = x1;
+ Register element = x2;
+ Register separator = x3;
+ Register array_length = x4;
+ Register result_pos = x5;
+ Register map = x6;
+ Register string_length = x10;
+ Register elements_end = x11;
+ Register string = x12;
+ Register scratch1 = x13;
+ Register scratch2 = x14;
+ Register scratch3 = x7;
+ Register separator_length = x15;
+
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+
+ // The separator operand is on the stack.
+ __ Pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ JumpIfNotObjectType(array, map, scratch1, JS_ARRAY_TYPE, &bailout);
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(map, scratch1, &bailout);
+
+ // If the array has length zero, return the empty string.
+ // Load and untag the length of the array.
+ // It is an unsigned value, so we can skip sign extension.
+ // We assume little endianness.
+ __ Ldrsw(array_length,
+ UntagSmiFieldMemOperand(array, JSArray::kLengthOffset));
+ __ Cbnz(array_length, &non_trivial_array);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ B(&done);
+
+ __ Bind(&non_trivial_array);
+ // Get the FixedArray containing array's elements.
+ __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths.
+ __ Mov(string_length, 0);
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (not smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (FLAG_debug_code) {
+ __ Cmp(array_length, 0);
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ }
+ __ Bind(&loop);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ JumpIfSmi(string, &bailout);
+ __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ Ldrsw(scratch1,
+ UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ Adds(string_length, string_length, scratch1);
+ __ B(vs, &bailout);
+ __ Cmp(element, elements_end);
+ __ B(lt, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ Cmp(array_length, 1);
+ __ B(ne, &not_size_one_array);
+ __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ __ Bind(&not_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array (not smi).
+ // string_length: Sum of string lengths (not smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string.
+ // Load the separator length as untagged.
+ // We assume little endianness, and that the length is positive.
+ __ Ldrsw(separator_length,
+ UntagSmiFieldMemOperand(separator,
+ SeqOneByteString::kLengthOffset));
+ __ Sub(string_length, string_length, separator_length);
+ __ Umaddl(string_length, array_length.W(), separator_length.W(),
+ string_length);
+
+ // Get first element in the array.
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array (not smi).
+ __ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3,
+ &bailout);
+
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ // TODO(all): useless unless AllocateAsciiString trashes the register.
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ // Check the length of the separator.
+ __ Cmp(separator_length, 1);
+ __ B(eq, &one_char_separator);
+ __ B(gt, &long_separator);
+
+ // Empty separator case
+ __ Bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &empty_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ // One-character separator case
+ __ Bind(&one_char_separator);
+ // Replace separator with its ASCII character value.
+ __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ B(&one_char_separator_loop_entry);
+
+ __ Bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ASCII char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
+
+ // Copy next array element to the result.
+ __ Bind(&one_char_separator_loop_entry);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &one_char_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ Bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ // TODO(all): hoist next two instructions.
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(separator, String::kLengthOffset));
+ __ Add(string, separator, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+
+ __ Bind(&long_separator);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &long_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ __ Bind(&bailout);
+ // Returning undefined will force slower code to handle it.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRunTime");
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ if (expr->is_jsruntime()) {
+ // Push the builtins object as the receiver.
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x0, FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
+ __ Push(x0);
+
+ // Load the function from the receiver.
+ Handle<String> name = expr->name();
+ __ Mov(x2, Operand(name));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+ } else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ Mov(x10, Smi::FromInt(strict_mode()));
+ __ Push(x10);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(x0);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is allowed.
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
+ if (var->IsUnallocated()) {
+ __ Ldr(x12, GlobalObjectMemOperand());
+ __ Mov(x11, Operand(var->name()));
+ __ Mov(x10, Smi::FromInt(SLOPPY));
+ __ Push(x12, x11, x10);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(x0);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(var->is_this());
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ Mov(x2, Operand(var->name()));
+ __ Push(context_register(), x2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+ context()->Plug(x0);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ break;
+ }
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ // TODO(jbramley): This could be much more efficient using (for
+ // example) the CSEL instruction.
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+
+ __ Bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&done);
+ if (context()->IsStackValue()) {
+ __ Push(result_register());
+ }
+ }
+ break;
+ }
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ {
+ StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(x0);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ Push(xzr);
+ }
+ if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in the accumulator.
+ VisitForAccumulatorValue(prop->obj());
+ __ Push(x0);
+ EmitNamedPropertyLoad(prop);
+ } else {
+ // KEYED_PROPERTY
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Peek(x1, 0);
+ __ Push(x0);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(x0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property we
+ // store the result under the receiver that is currently on top of the
+ // stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ Push(x0);
+ break;
+ case NAMED_PROPERTY:
+ __ Poke(x0, kPointerSize);
+ break;
+ case KEYED_PROPERTY:
+ __ Poke(x0, kPointerSize * 2);
+ break;
+ }
+ }
+ }
+
+ __ Adds(x0, x0, Smi::FromInt(count_value));
+ __ B(vc, &done);
+ // Call stub. Undo operation first.
+ __ Sub(x0, x0, Smi::FromInt(count_value));
+ __ B(&stub_call);
+ __ Bind(&slow);
+ }
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ Push(x0);
+ break;
+ case NAMED_PROPERTY:
+ __ Poke(x0, kXRegSize);
+ break;
+ case KEYED_PROPERTY:
+ __ Poke(x0, 2 * kXRegSize);
+ break;
+ }
+ }
+ }
+
+ __ Bind(&stub_call);
+ __ Mov(x1, x0);
+ __ Mov(x0, Smi::FromInt(count_value));
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ __ Bind(&done);
+
+ // Store the value returned in x0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(x0);
+ }
+ // For all contexts except EffectConstant We have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(x1);
+ CallStoreIC(expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Pop(x1); // Key.
+ __ Pop(x2); // Receiver.
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsUnallocated()) {
+ Comment cmnt(masm_, "Global variable");
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(proxy->name()));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ CallLoadIC(NOT_CONTEXTUAL);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(x0);
+ } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+
+ __ Bind(&slow);
+ __ Mov(x0, Operand(proxy->name()));
+ __ Push(cp, x0);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ Bind(&done);
+
+ context()->Plug(x0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInDuplicateContext(expr);
+ }
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof");
+ Comment cmnt(masm_, "[ EmitLiteralCompareTypeof");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ if (check->Equals(isolate()->heap()->number_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string");
+ __ JumpIfSmi(x0, if_true);
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ CompareRoot(x0, Heap::kHeapNumberMapRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->string_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string");
+ __ JumpIfSmi(x0, if_false);
+ // Check for undetectable objects => false.
+ __ JumpIfObjectType(x0, x0, x1, FIRST_NONSTRING_TYPE, if_false, ge);
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false,
+ fall_through);
+ } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string");
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x0, x1, SYMBOL_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string");
+ __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true);
+ __ CompareRoot(x0, Heap::kFalseValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (FLAG_harmony_typeof &&
+ check->Equals(isolate()->heap()->null_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string");
+ __ CompareRoot(x0, Heap::kNullValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ ASM_LOCATION(
+ "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true);
+ __ JumpIfSmi(x0, if_false);
+ // Check for undetectable objects => true.
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true,
+ fall_through);
+ } else if (check->Equals(isolate()->heap()->function_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string");
+ __ JumpIfSmi(x0, if_false);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ JumpIfObjectType(x0, x10, x11, JS_FUNCTION_TYPE, if_true);
+ __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false,
+ fall_through);
+
+ } else if (check->Equals(isolate()->heap()->object_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
+ __ JumpIfSmi(x0, if_false);
+ if (!FLAG_harmony_typeof) {
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
+ }
+ // Check for JS objects => true.
+ Register map = x10;
+ __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ if_false, lt);
+ __ CompareInstanceType(map, x11, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(gt, if_false);
+ // Check for undetectable objects => false.
+ __ Ldrb(x10, FieldMemOperand(map, Map::kBitFieldOffset));
+
+ __ TestAndSplit(x10, 1 << Map::kIsUndetectable, if_true, if_false,
+ fall_through);
+
+ } else {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other");
+ if (if_false != fall_through) __ B(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // Try to generate an optimized comparison with a literal value.
+ // TODO(jbramley): This only checks common values like NaN or undefined.
+ // Should it also handle ARM64 immediate operands?
+ if (TryLiteralCompare(expr)) {
+ return;
+ }
+
+ // Assign labels according to context()->PrepareTest.
+ Label materialize_true;
+ Label materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ // The stub returns 0 for true.
+ __ CompareAndSplit(x0, 0, eq, if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cond = CompareIC::ComputeCondition(op);
+
+ // Pop the stack value.
+ __ Pop(x1);
+
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ Split(cond, if_true, if_false, NULL);
+ __ Bind(&slow_case);
+ }
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ CompareAndSplit(x0, 0, cond, if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareNil");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(x0, nil_value);
+ Split(eq, if_true, if_false, fall_through);
+ } else {
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ CompareAndSplit(x0, 0, ne, if_true, if_false, fall_through);
+ }
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ // TODO(jbramley): Tidy this up once the merge is done, using named registers
+ // and suchlike. The implementation changes a little by bleeding_edge so I
+ // don't want to spend too much time on it now.
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ Push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ B(&suspend);
+
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&continuation);
+ __ B(&resume);
+
+ __ Bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
+ __ Mov(x1, Smi::FromInt(continuation.pos()));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
+ __ Cmp(__ StackPointer(), x1);
+ __ B(eq, &post_runtime);
+ __ Push(x0); // generator object
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Bind(&post_runtime);
+ __ Pop(result_register());
+ EmitReturnSequence();
+
+ __ Bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+ __ Str(x1, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ B(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ Bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(x2, Heap::kthrow_stringRootIndex); // "throw"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(x2, x3, x0); // "throw", iter, except
+ __ B(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ Bind(&l_try);
+ __ Pop(x0); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ Push(x0); // result
+ __ B(&l_suspend);
+
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&l_continuation);
+ __ B(&l_resume);
+
+ __ Bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ Peek(x0, generator_object_depth);
+ __ Push(x0); // g
+ ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
+ __ Mov(x1, Smi::FromInt(l_continuation.pos()));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Pop(x0); // result
+ EmitReturnSequence();
+ __ Bind(&l_resume); // received in x0
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ Bind(&l_next);
+ __ LoadRoot(x2, Heap::knext_stringRootIndex); // "next"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(x2, x3, x0); // "next", iter, received
+
+ // result = receiver[f](arg);
+ __ Bind(&l_call);
+ __ Peek(x1, 1 * kPointerSize);
+ __ Peek(x0, 2 * kPointerSize);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ Mov(x1, x0);
+ __ Poke(x1, 2 * kPointerSize);
+ CallFunctionStub stub(1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ Bind(&l_loop);
+ __ Push(x0); // save result
+ __ LoadRoot(x2, Heap::kdone_stringRootIndex); // "done"
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in x0
+ // The ToBooleanStub argument (result.done) is in x0.
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ Cbz(x0, &l_try);
+
+ // result.value
+ __ Pop(x0); // result
+ __ LoadRoot(x2, Heap::kvalue_stringRootIndex); // "value"
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in x0
+ context()->DropAndPlug(2, x0); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
+ Register value_reg = x0;
+ Register generator_object = x1;
+ Register the_hole = x2;
+ Register operand_stack_size = w3;
+ Register function = x4;
+
+ // The value stays in x0, and is ultimately read by the resumed generator, as
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed. r1
+ // will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ Pop(generator_object);
+
+ // Check generator state.
+ Label wrong_state, closed_state, done;
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ CompareAndBranch(x10, Smi::FromInt(0), eq, &closed_state);
+ __ CompareAndBranch(x10, Smi::FromInt(0), lt, &wrong_state);
+
+ // Load suspended function and context.
+ __ Ldr(cp, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContextOffset));
+ __ Ldr(function, FieldMemOperand(generator_object,
+ JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kReceiverOffset));
+ __ Push(x10);
+
+ // Push holes for the rest of the arguments to the generator function.
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+
+ // The number of arguments is stored as an int32_t, and -1 is a marker
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // extension to correctly handle it. However, in this case, we operate on
+ // 32-bit W registers, so extension isn't required.
+ __ Ldr(w10, FieldMemOperand(x10,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ PushMultipleTimes(the_hole, w10);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ Bl(&resume_frame);
+ __ B(&done);
+
+ __ Bind(&resume_frame);
+ __ Push(lr, // Return address.
+ fp, // Caller's frame pointer.
+ cp, // Callee's context.
+ function); // Callee's JS Function.
+ __ Add(fp, __ StackPointer(), kPointerSize * 2);
+
+ // Load and untag the operand stack size.
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kOperandStackOffset));
+ __ Ldr(operand_stack_size,
+ UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ Cbnz(operand_stack_size, &slow_resume);
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ __ Ldrsw(x11,
+ UntagSmiFieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ __ Add(x10, x10, x11);
+ __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ Str(x12, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ __ Br(x10);
+
+ __ Bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ __ PushMultipleTimes(the_hole, operand_stack_size);
+
+ __ Mov(x10, Smi::FromInt(resume_mode));
+ __ Push(generator_object, result_register(), x10);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ Unreachable();
+
+ // Reach here when generator is closed.
+ __ Bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ Push(value_reg);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ B(&done);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ Bind(&wrong_state);
+ __ Push(generator_object);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+
+ __ Bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ // Allocate and populate an object with this form: { value: VAL, done: DONE }
+
+ Register result = x0;
+ __ Allocate(map->instance_size(), result, x10, x11, &gc_required, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ __ Bind(&allocated);
+ Register map_reg = x1;
+ Register result_value = x2;
+ Register boolean_done = x3;
+ Register empty_fixed_array = x4;
+ __ Mov(map_reg, Operand(map));
+ __ Pop(result_value);
+ __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
+ __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ // TODO(jbramley): Use Stp if possible.
+ __ Str(map_reg, FieldMemOperand(result, HeapObject::kMapOffset));
+ __ Str(empty_fixed_array,
+ FieldMemOperand(result, JSObject::kPropertiesOffset));
+ __ Str(empty_fixed_array, FieldMemOperand(result, JSObject::kElementsOffset));
+ __ Str(result_value,
+ FieldMemOperand(result,
+ JSGeneratorObject::kResultValuePropertyOffset));
+ __ Str(boolean_done,
+ FieldMemOperand(result,
+ JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(result, JSGeneratorObject::kResultValuePropertyOffset,
+ x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+}
+
+
+// TODO(all): I don't like this method.
+// It seems to me that in too many places x0 is used in place of this.
+// Also, this function is not suitable for all places where x0 should be
+// abstracted (eg. when used as an argument). But some places assume that the
+// first argument register is x0, and use this function instead.
+// Considering that most of the register allocation is hard-coded in the
+// FullCodeGen, that it is unlikely we will need to change it extensively, and
+// that abstracting the allocation through functions would not yield any
+// performance benefit, I think the existence of this function is debatable.
+Register FullCodeGenerator::result_register() {
+ return x0;
+}
+
+
+Register FullCodeGenerator::context_register() {
+ return cp;
+}
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
+ __ Str(value, MemOperand(fp, frame_offset));
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ Ldr(dst, ContextMemOperand(cp, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ ASSERT(kSmiTag == 0);
+ __ Push(xzr);
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
+ __ Push(x10);
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(x10);
+ }
+}
+
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
+ ASSERT(!result_register().is(x10));
+ // Preserve the result register while executing finally block.
+ // Also cook the return address in lr to the stack (smi encoded Code* delta).
+ __ Sub(x10, lr, Operand(masm_->CodeObject()));
+ __ SmiTag(x10);
+ __ Push(result_register(), x10);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Mov(x10, pending_message_obj);
+ __ Ldr(x10, MemOperand(x10));
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Mov(x11, has_pending_message);
+ __ Ldr(x11, MemOperand(x11));
+ __ SmiTag(x11);
+
+ __ Push(x10, x11);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Mov(x10, pending_message_script);
+ __ Ldr(x10, MemOperand(x10));
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
+ ASSERT(!result_register().is(x10));
+
+ // Restore pending message from stack.
+ __ Pop(x10, x11, x12);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Mov(x13, pending_message_script);
+ __ Str(x10, MemOperand(x13));
+
+ __ SmiUntag(x11);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Mov(x13, has_pending_message);
+ __ Str(x11, MemOperand(x13));
+
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Mov(x13, pending_message_obj);
+ __ Str(x12, MemOperand(x13));
+
+ // Restore result register and cooked return address from the stack.
+ __ Pop(x10, result_register());
+
+ // Uncook the return address (see EnterFinallyBlock).
+ __ SmiUntag(x10);
+ __ Add(x11, x10, Operand(masm_->CodeObject()));
+ __ Br(x11);
+}
+
+
+#undef __
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ // Turn the jump into a nop.
+ Address branch_address = pc - 3 * kInstructionSize;
+ PatchingAssembler patcher(branch_address, 1);
+
+ ASSERT(Instruction::Cast(branch_address)
+ ->IsNop(Assembler::INTERRUPT_CODE_NOP) ||
+ (Instruction::Cast(branch_address)->IsCondBranchImm() &&
+ Instruction::Cast(branch_address)->ImmPCOffset() ==
+ 6 * kInstructionSize));
+
+ switch (target_state) {
+ case INTERRUPT:
+ // <decrement profiling counter>
+ // .. .. .. .. b.pl ok
+ // .. .. .. .. ldr x16, pc+<interrupt stub address>
+ // .. .. .. .. blr x16
+ // ... more instructions.
+ // ok-label
+ // Jump offset is 6 instructions.
+ patcher.b(6, pl);
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // <decrement profiling counter>
+ // .. .. .. .. mov x0, x0 (NOP)
+ // .. .. .. .. ldr x16, pc+<on-stack replacement address>
+ // .. .. .. .. blr x16
+ patcher.nop(Assembler::INTERRUPT_CODE_NOP);
+ break;
+ }
+
+ // Replace the call address.
+ Instruction* load = Instruction::Cast(pc)->preceding(2);
+ Address interrupt_address_pointer =
+ reinterpret_cast<Address>(load) + load->ImmPCOffset();
+ ASSERT((Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OnStackReplacement()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->InterruptCheck()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OsrAfterStackCheck()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OnStackReplacement()
+ ->entry())));
+ Memory::uint64_at(interrupt_address_pointer) =
+ reinterpret_cast<uint64_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, reinterpret_cast<Address>(load), replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ // TODO(jbramley): There should be some extra assertions here (as in the ARM
+ // back-end), but this function is gone in bleeding_edge so it might not
+ // matter anyway.
+ Instruction* jump_or_nop = Instruction::Cast(pc)->preceding(3);
+
+ if (jump_or_nop->IsNop(Assembler::INTERRUPT_CODE_NOP)) {
+ Instruction* load = Instruction::Cast(pc)->preceding(2);
+ uint64_t entry = Memory::uint64_at(reinterpret_cast<Address>(load) +
+ load->ImmPCOffset());
+ if (entry == reinterpret_cast<uint64_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ } else if (entry == reinterpret_cast<uint64_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry())) {
+ return OSR_AFTER_STACK_CHECK;
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ return INTERRUPT;
+}
+
+
+#define __ ACCESS_MASM(masm())
+
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+ int* stack_depth,
+ int* context_length) {
+ ASM_LOCATION("FullCodeGenerator::TryFinally::Exit");
+ // The macros used here must preserve the result register.
+
+ // Because the handler block contains the context of the finally
+ // code, we can restore it directly from there for the finally code
+ // rather than iteratively unwinding contexts via their previous
+ // links.
+ __ Drop(*stack_depth); // Down to the handler block.
+ if (*context_length > 0) {
+ // Restore the context to its dedicated register and the stack.
+ __ Peek(cp, StackHandlerConstants::kContextOffset);
+ __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ PopTryHandler();
+ __ Bl(finally_entry_);
+
+ *stack_depth = 0;
+ *context_length = 0;
+ return previous_;
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/ic-arm64.cc b/deps/v8/src/arm64/ic-arm64.cc
new file mode 100644
index 0000000000..5fb7d633fd
--- /dev/null
+++ b/deps/v8/src/arm64/ic-arm64.cc
@@ -0,0 +1,1407 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "arm64/assembler-arm64.h"
+#include "code-stubs.h"
+#include "codegen.h"
+#include "disasm.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// "type" holds an instance type on entry and is not clobbered.
+// Generated code branch on "global_object" if type is any kind of global
+// JS object.
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
+ __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
+ __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
+ __ B(eq, global_object);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+//
+// "receiver" holds the receiver on entry and is unchanged.
+// "elements" holds the property dictionary on fall through.
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
+ ASSERT(!AreAliased(receiver, elements, scratch0, scratch1));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ // Let t be the object instance type, we want:
+ // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE.
+ // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only
+ // check the lower bound.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+ __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE,
+ miss, lt);
+
+ // scratch0 now contains the map of the receiver and scratch1 the object type.
+ Register map = scratch0;
+ Register type = scratch1;
+
+ // Check if the receiver is a global JS object.
+ GenerateGlobalInstanceTypeCheck(masm, type, miss);
+
+ // Check that the object does not require access checks.
+ __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss);
+ __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss);
+
+ // Check that the properties dictionary is valid.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done.
+// The scratch registers need to be different from elements, name and result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+ ASSERT(!AreAliased(result, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal property.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
+ __ B(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ __ Ldr(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store (never clobbered).
+//
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ static const int kTypeAndReadOnlyMask =
+ PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY);
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, kTypeAndReadOnlyMask);
+ __ B(ne, miss);
+
+ // Store the value at the masked, scaled index and return.
+ static const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
+ __ Str(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ Mov(scratch1, value);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object and return the map of the
+// receiver in 'map_scratch' if the receiver is not a SMI.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map_scratch,
+ Register scratch,
+ int interceptor_bit,
+ Label* slow) {
+ ASSERT(!AreAliased(map_scratch, scratch));
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
+ __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
+ __ Tbnz(scratch, interceptor_bit, slow);
+
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object, we enter the
+ // runtime system to make sure that indexing into string objects work
+ // as intended.
+ STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ __ Cmp(scratch, JS_OBJECT_TYPE);
+ __ B(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+//
+// receiver - holds the receiver on entry.
+// Unchanged unless 'result' is the same register.
+//
+// key - holds the smi key on entry.
+// Unchanged unless 'result' is the same register.
+//
+// elements - holds the elements of the receiver on exit.
+//
+// elements_map - holds the elements map on exit if the not_fast_array branch is
+// taken. Otherwise, this is used as a scratch register.
+//
+// result - holds the result on exit if the load succeeded.
+// Allowed to be the the same as 'receiver' or 'key'.
+// Unchanged on bailout so 'receiver' and 'key' can be safely
+// used by further computation.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register elements_map,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* slow) {
+ ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
+
+ // Check for fast array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
+ not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
+
+ // The elements_map register is only used for the not_fast_array path, which
+ // was handled above. From this point onward it is a scratch register.
+ Register scratch1 = elements_map;
+
+ // Check that the key (index) is within bounds.
+ __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch1);
+ __ B(hs, slow);
+
+ // Fast case: Do the load.
+ __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
+
+ // Move the value to the result register.
+ // 'result' can alias with 'receiver' or 'key' but these two must be
+ // preserved if we jump to 'slow'.
+ __ Mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+// The map of the key is returned in 'map_scratch'.
+// If the jump to 'index_string' is done the hash of the key is left
+// in 'hash_scratch'.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map_scratch,
+ Register hash_scratch,
+ Label* index_string,
+ Label* not_unique) {
+ ASSERT(!AreAliased(key, map_scratch, hash_scratch));
+
+ // Is the key a name?
+ Label unique;
+ __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
+ not_unique, hi);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ B(eq, &unique);
+
+ // Is the string an array index with cached numeric value?
+ __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ TestAndBranchIfAllClear(hash_scratch,
+ Name::kContainsCachedArrayIndexMask,
+ index_string);
+
+ // Is the string internalized? We know it's a string, so a single bit test is
+ // enough.
+ __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
+
+ __ Bind(&unique);
+ // Fall through if the key is a unique name.
+}
+
+
+// Neither 'object' nor 'key' are modified by this function.
+//
+// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
+// left with the object's elements map. Otherwise, it is used as a scratch
+// register.
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register map,
+ Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
+
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
+ slow_case, lt);
+
+ // Check that the key is a positive smi.
+ __ JumpIfNotSmi(key, slow_case);
+ __ Tbnz(key, kXSignBit, slow_case);
+
+ // Load the elements object and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup.
+ __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ Sub(scratch1, scratch1, Smi::FromInt(2));
+ __ Cmp(key, scratch1);
+ __ B(hs, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ static const int offset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ Add(scratch1, map, offset);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
+
+ // Load value from context and return it.
+ __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
+ __ SmiUntag(scratch1);
+ __ Lsl(scratch1, scratch1, kPointerSizeLog2);
+ __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
+ // The base of the result (scratch2) is passed to RecordWrite in
+ // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
+ return MemOperand(scratch2, scratch1);
+}
+
+
+// The 'parameter_map' register must be loaded with the parameter map of the
+// arguments object and is overwritten.
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ ASSERT(!AreAliased(key, parameter_map, scratch));
+
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(
+ backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch);
+ __ B(hs, slow_case);
+
+ __ Add(backing_store,
+ backing_store,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch, key);
+ return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, x0, x2, x3, x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+ Label miss;
+
+ GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
+
+ // x1 now holds the property dictionary.
+ GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+ ASM_LOCATION("LoadIC::GenerateMiss");
+
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
+
+ // Perform tail call to the entry.
+ __ Push(x0, x2);
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+
+ __ Push(x0, x2);
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Register result = x0;
+ Register key = x0;
+ Register receiver = x1;
+ Label miss, unmapped;
+
+ Register map_scratch = x2;
+ MemOperand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
+ __ Ldr(result, mapped_location);
+ __ Ret();
+
+ __ Bind(&unmapped);
+ // Parameter map is left in map_scratch when a jump on unmapped is done.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
+ __ Ldr(x2, unmapped_location);
+ __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
+ // Move the result in x0. x0 must be preserved on miss.
+ __ Mov(result, x2);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -----------------------------------
+
+ Label slow, notin;
+
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register map = x3;
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register mapped1 = x4;
+ Register mapped2 = x5;
+
+ MemOperand mapped =
+ GenerateMappedArgumentsLookup(masm, receiver, key, map,
+ mapped1, mapped2,
+ &notin, &slow);
+ Operand mapped_offset = mapped.OffsetAsOperand();
+ __ Str(value, mapped);
+ __ Add(x10, mapped.base(), mapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+
+ __ Bind(&notin);
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register unmapped1 = map; // This is assumed to alias 'map'.
+ Register unmapped2 = x4;
+ MemOperand unmapped =
+ GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
+ Operand unmapped_offset = unmapped.OffsetAsOperand();
+ __ Str(value, unmapped);
+ __ Add(x10, unmapped.base(), unmapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(unmapped.base(), x10, x11,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
+
+ __ Push(x1, x0);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Register key = x0;
+ Register receiver = x1;
+
+ __ Push(receiver, key);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
+ Register key,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label *slow) {
+ ASSERT(!AreAliased(
+ key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label check_number_dictionary;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
+ __ IncrementCounter(
+ isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
+ __ Ret();
+
+ __ Bind(&check_number_dictionary);
+ __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
+
+ // Check whether we have a number dictionary.
+ __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
+
+ __ LoadFromNumberDictionary(
+ slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
+ __ Ret();
+}
+
+static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
+ Register key,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label *slow) {
+ ASSERT(!AreAliased(
+ key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label probe_dictionary, property_array_property;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup cache.
+ // Otherwise probe the dictionary.
+ __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
+
+ // We keep the map of the receiver in scratch1.
+ Register receiver_map = scratch1;
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the name hash.
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
+ __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(scratch2, scratch2, mask);
+
+ // Load the key (consisting of map and unique name) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+
+ __ Mov(scratch3, cache_keys);
+ __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ // Load map and make scratch3 pointing to the next entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, &try_next_entry);
+ __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
+ __ Cmp(key, scratch4);
+ __ B(eq, &hit_on_nth_entry[i]);
+ __ Bind(&try_next_entry);
+ }
+
+ // Last entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, slow);
+ __ Ldr(scratch4, MemOperand(scratch3));
+ __ Cmp(key, scratch4);
+ __ B(ne, slow);
+
+ // Get field offset.
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ Bind(&hit_on_nth_entry[i]);
+ __ Mov(scratch3, cache_field_offsets);
+ if (i != 0) {
+ __ Add(scratch2, scratch2, i);
+ }
+ __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
+ __ Ldrb(scratch5,
+ FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
+ __ Subs(scratch4, scratch4, scratch5);
+ __ B(ge, &property_array_property);
+ if (i != 0) {
+ __ B(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ Bind(&load_in_object_property);
+ __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
+ __ Add(scratch5, scratch5, scratch4); // Index from start of object.
+ __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
+ __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, scratch1, scratch2);
+ __ Ret();
+
+ // Load property array property.
+ __ Bind(&property_array_property);
+ __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, scratch1, scratch2);
+ __ Ret();
+
+ // Do a quick inline probe of the receiver's dictionary, if it exists.
+ __ Bind(&probe_dictionary);
+ __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
+ // Load the property.
+ GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+ 1, scratch1, scratch2);
+ __ Ret();
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow, check_name, index_smi, index_name;
+
+ Register key = x0;
+ Register receiver = x1;
+
+ __ JumpIfNotSmi(key, &check_name);
+ __ Bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+ GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+
+ // Slow case, key and receiver still in x0 and x1.
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ Bind(&check_name);
+ GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
+
+ GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+
+ __ Bind(&index_name);
+ __ IndexFromHash(x3, key);
+ // Now jump to the place where smi keys are handled.
+ __ B(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key (index)
+ // -- x1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Register index = x0;
+ Register receiver = x1;
+ Register result = x0;
+ Register scratch = x3;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow;
+ Register key = x0;
+ Register receiver = x1;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
+
+ // Get the map of the receiver.
+ Register map = x2;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset));
+ ASSERT(kSlowCaseBitFieldMask ==
+ ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
+ __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow);
+ __ Tbz(x3, Map::kHasIndexedInterceptor, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate()),
+ 2,
+ 1);
+
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateMiss");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSlow");
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ // Push PropertyAttributes(NONE) and strict_mode for runtime call.
+ STATIC_ASSERT(NONE == 0);
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ __ Push(xzr, x10);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length,
+ Register value,
+ Register key,
+ Register receiver,
+ Register receiver_map,
+ Register elements_map,
+ Register elements) {
+ ASSERT(!AreAliased(
+ value, key, receiver, receiver_map, elements_map, elements, x10, x11));
+
+ Label transition_smi_elements;
+ Label transition_double_elements;
+ Label fast_double_without_map_check;
+ Label non_double_value;
+ Label finish_store;
+
+ __ Bind(fast_object);
+ if (check_map == kCheckMap) {
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(ne, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because there
+ // may be a callback on the element.
+ Label holecheck_passed;
+ __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+ __ bind(&holecheck_passed);
+
+ // Smi stores don't require further checks.
+ __ JumpIfSmi(value, &finish_store);
+
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
+
+ __ Bind(&finish_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Smi::FromInt(1));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+
+ Register address = x11;
+ __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Str(value, MemOperand(address));
+
+ Label dont_record_write;
+ __ JumpIfSmi(value, &dont_record_write);
+
+ // Update write barrier for the elements array address.
+ __ Mov(x10, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ x10,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ Bind(&dont_record_write);
+ __ Ret();
+
+
+ __ Bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so go to
+ // the runtime.
+ __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+
+ __ Bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ elements,
+ x10,
+ d0,
+ d1,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Smi::FromInt(1));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+
+ __ Bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ x10,
+ x11,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&fast_double_without_map_check);
+
+ __ Bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ x10,
+ x11,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+
+ __ Bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ x10,
+ x11,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow;
+ Label array;
+ Label fast_object;
+ Label extra;
+ Label fast_object_grow;
+ Label fast_double_grow;
+ Label fast_double;
+
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register receiver_map = x3;
+ Register elements = x4;
+ Register elements_map = x5;
+
+ __ JumpIfNotSmi(key, &slow);
+ __ JumpIfSmi(receiver, &slow);
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
+
+ // Check if the object is a JS array or not.
+ Register instance_type = x10;
+ __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
+ __ B(eq, &array);
+ // Check that the object is some kind of JSObject.
+ __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
+ __ B(lt, &slow);
+
+ // Object case: Check key against length in the elements array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(hi, &fast_object);
+
+
+ __ Bind(&slow);
+ // Slow case, handle jump to runtime.
+ // Live values:
+ // x0: value
+ // x1: key
+ // x2: receiver
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+
+ __ Bind(&extra);
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(ls, &slow);
+
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(eq, &fast_object_grow);
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ B(eq, &fast_double_grow);
+ __ B(&slow);
+
+
+ __ Bind(&array);
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(eq, &extra); // We can handle the case where we are appending 1 element.
+ __ B(lo, &slow);
+
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, x1, x2, x3, x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ __ Push(x1, x2, x0);
+
+ // Tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+ Register value = x0;
+ Register receiver = x1;
+ Register name = x2;
+ Register dictionary = x3;
+
+ GenerateNameDictionaryReceiverCheck(
+ masm, receiver, dictionary, x4, x5, &miss);
+
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ __ Push(x1, x2, x0);
+
+ __ Mov(x11, Smi::FromInt(NONE)); // PropertyAttributes
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ __ Push(x11, x10);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, name and value for runtime call.
+ __ Push(x1, x2, x0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return al;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address info_address =
+ Assembler::return_address_from_call_start(address);
+
+ InstructionSequence* patch_info = InstructionSequence::At(info_address);
+ return patch_info->IsInlineData();
+}
+
+
+// Activate a SMI fast-path by patching the instructions generated by
+// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
+// JumpPatchSite::EmitPatchInfo().
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ // The patch information is encoded in the instruction stream using
+ // instructions which have no side effects, so we can safely execute them.
+ // The patch information is encoded directly after the call to the helper
+ // function which is requesting this patch operation.
+ Address info_address =
+ Assembler::return_address_from_call_start(address);
+ InlineSmiCheckInfo info(info_address);
+
+ // Check and decode the patch information instruction.
+ if (!info.HasSmiCheck()) {
+ return;
+ }
+
+ if (FLAG_trace_ic) {
+ PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
+ address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
+ }
+
+ // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
+ // and JumpPatchSite::EmitJumpIfSmi().
+ // Changing
+ // tb(n)z xzr, #0, <target>
+ // to
+ // tb(!n)z test_reg, #0, <target>
+ Instruction* to_patch = info.SmiCheck();
+ PatchingAssembler patcher(to_patch, 1);
+ ASSERT(to_patch->IsTestBranch());
+ ASSERT(to_patch->ImmTestBranchBit5() == 0);
+ ASSERT(to_patch->ImmTestBranchBit40() == 0);
+
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+
+ int branch_imm = to_patch->ImmTestBranch();
+ Register smi_reg;
+ if (check == ENABLE_INLINED_SMI_CHECK) {
+ ASSERT(to_patch->Rt() == xzr.code());
+ smi_reg = info.SmiRegister();
+ } else {
+ ASSERT(check == DISABLE_INLINED_SMI_CHECK);
+ ASSERT(to_patch->Rt() != xzr.code());
+ smi_reg = xzr;
+ }
+
+ if (to_patch->Mask(TestBranchMask) == TBZ) {
+ // This is JumpIfNotSmi(smi_reg, branch_imm).
+ patcher.tbnz(smi_reg, 0, branch_imm);
+ } else {
+ ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
+ // This is JumpIfSmi(smi_reg, branch_imm).
+ patcher.tbz(smi_reg, 0, branch_imm);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
new file mode 100644
index 0000000000..4d1428a150
--- /dev/null
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -0,0 +1,333 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#define ARM64_DEFINE_FP_STATICS
+
+#include "arm64/instructions-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool Instruction::IsLoad() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) != 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_w:
+ case LDRSB_x:
+ case LDRSH_w:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDR_s:
+ case LDR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+bool Instruction::IsStore() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) == 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x:
+ case STR_s:
+ case STR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+static uint64_t RotateRight(uint64_t value,
+ unsigned int rotate,
+ unsigned int width) {
+ ASSERT(width <= 64);
+ rotate &= 63;
+ return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
+ (value >> rotate);
+}
+
+
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
+ uint64_t value,
+ unsigned width) {
+ ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ (width == 32));
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ uint64_t result = value & ((1UL << width) - 1UL);
+ for (unsigned i = width; i < reg_size; i *= 2) {
+ result |= (result << i);
+ }
+ return result;
+}
+
+
+// Logical immediates can't encode zero, so a return value of zero is used to
+// indicate a failure case. Specifically, where the constraints on imm_s are not
+// met.
+uint64_t Instruction::ImmLogical() {
+ unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
+ int64_t n = BitN();
+ int64_t imm_s = ImmSetBits();
+ int64_t imm_r = ImmRotate();
+
+ // An integer is constructed from the n, imm_s and imm_r bits according to
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+
+ if (n == 1) {
+ if (imm_s == 0x3F) {
+ return 0;
+ }
+ uint64_t bits = (1UL << (imm_s + 1)) - 1;
+ return RotateRight(bits, imm_r, 64);
+ } else {
+ if ((imm_s >> 1) == 0x1F) {
+ return 0;
+ }
+ for (int width = 0x20; width >= 0x2; width >>= 1) {
+ if ((imm_s & width) == 0) {
+ int mask = width - 1;
+ if ((imm_s & mask) == mask) {
+ return 0;
+ }
+ uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
+ return RepeatBitsAcrossReg(reg_size,
+ RotateRight(bits, imm_r & mask, width),
+ width);
+ }
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+float Instruction::ImmFP32() {
+ // ImmFP: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return rawbits_to_float(result);
+}
+
+
+double Instruction::ImmFP64() {
+ // ImmFP: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+ return rawbits_to_double(result);
+}
+
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
+ switch (op) {
+ case STP_x:
+ case LDP_x:
+ case STP_d:
+ case LDP_d: return LSDoubleWord;
+ default: return LSWord;
+ }
+}
+
+
+ptrdiff_t Instruction::ImmPCOffset() {
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // PC-relative addressing. Only ADR is supported.
+ offset = ImmPCRel();
+ } else if (BranchType() != UnknownBranchType) {
+ // All PC-relative branches.
+ // Relative branch offsets are instruction-size-aligned.
+ offset = ImmBranch() << kInstructionSizeLog2;
+ } else {
+ // Load literal (offset from PC).
+ ASSERT(IsLdrLiteral());
+ // The offset is always shifted by 2 bits, even for loads to 64-bits
+ // registers.
+ offset = ImmLLiteral() << kInstructionSizeLog2;
+ }
+ return offset;
+}
+
+
+Instruction* Instruction::ImmPCOffsetTarget() {
+ return InstructionAtOffset(ImmPCOffset());
+}
+
+
+bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
+ int32_t offset) {
+ return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
+}
+
+
+bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
+ return IsValidImmPCOffset(BranchType(), DistanceTo(target));
+}
+
+
+void Instruction::SetImmPCOffsetTarget(Instruction* target) {
+ if (IsPCRelAddressing()) {
+ SetPCRelImmTarget(target);
+ } else if (BranchType() != UnknownBranchType) {
+ SetBranchImmTarget(target);
+ } else {
+ SetImmLLiteral(target);
+ }
+}
+
+
+void Instruction::SetPCRelImmTarget(Instruction* target) {
+ // ADRP is not supported, so 'this' must point to an ADR instruction.
+ ASSERT(Mask(PCRelAddressingMask) == ADR);
+
+ Instr imm = Assembler::ImmPCRelAddress(DistanceTo(target));
+
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+}
+
+
+void Instruction::SetBranchImmTarget(Instruction* target) {
+ ASSERT(IsAligned(DistanceTo(target), kInstructionSize));
+ Instr branch_imm = 0;
+ uint32_t imm_mask = 0;
+ ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
+ switch (BranchType()) {
+ case CondBranchType: {
+ branch_imm = Assembler::ImmCondBranch(offset);
+ imm_mask = ImmCondBranch_mask;
+ break;
+ }
+ case UncondBranchType: {
+ branch_imm = Assembler::ImmUncondBranch(offset);
+ imm_mask = ImmUncondBranch_mask;
+ break;
+ }
+ case CompareBranchType: {
+ branch_imm = Assembler::ImmCmpBranch(offset);
+ imm_mask = ImmCmpBranch_mask;
+ break;
+ }
+ case TestBranchType: {
+ branch_imm = Assembler::ImmTestBranch(offset);
+ imm_mask = ImmTestBranch_mask;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ SetInstructionBits(Mask(~imm_mask) | branch_imm);
+}
+
+
+void Instruction::SetImmLLiteral(Instruction* source) {
+ ASSERT(IsAligned(DistanceTo(source), kInstructionSize));
+ ptrdiff_t offset = DistanceTo(source) >> kLiteralEntrySizeLog2;
+ Instr imm = Assembler::ImmLLiteral(offset);
+ Instr mask = ImmLLiteral_mask;
+
+ SetInstructionBits(Mask(~mask) | imm);
+}
+
+
+// TODO(jbramley): We can't put this inline in the class because things like
+// xzr and Register are not defined in that header. Consider adding
+// instructions-arm64-inl.h to work around this.
+bool InstructionSequence::IsInlineData() const {
+ // Inline data is encoded as a single movz instruction which writes to xzr
+ // (x31).
+ return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
+ // TODO(all): If we extend ::InlineData() to support bigger data, we need
+ // to update this method too.
+}
+
+
+// TODO(jbramley): We can't put this inline in the class because things like
+// xzr and Register are not defined in that header. Consider adding
+// instructions-arm64-inl.h to work around this.
+uint64_t InstructionSequence::InlineData() const {
+ ASSERT(IsInlineData());
+ uint64_t payload = ImmMoveWide();
+ // TODO(all): If we extend ::InlineData() to support bigger data, we need
+ // to update this method too.
+ return payload;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
new file mode 100644
index 0000000000..ab64cb2bf0
--- /dev/null
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -0,0 +1,501 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
+#define V8_ARM64_INSTRUCTIONS_ARM64_H_
+
+#include "globals.h"
+#include "utils.h"
+#include "arm64/constants-arm64.h"
+#include "arm64/utils-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ISA constants. --------------------------------------------------------------
+
+typedef uint32_t Instr;
+
+// The following macros initialize a float/double variable with a bit pattern
+// without using static initializers: If ARM64_DEFINE_FP_STATICS is defined, the
+// symbol is defined as uint32_t/uint64_t initialized with the desired bit
+// pattern. Otherwise, the same symbol is declared as an external float/double.
+#if defined(ARM64_DEFINE_FP_STATICS)
+#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
+#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
+#else
+#define DEFINE_FLOAT(name, value) extern const float name
+#define DEFINE_DOUBLE(name, value) extern const double name
+#endif // defined(ARM64_DEFINE_FP_STATICS)
+
+DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
+DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
+DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
+DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
+
+// This value is a signalling NaN as both a double and as a float (taking the
+// least-significant word).
+DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
+DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
+
+// A similar value, but as a quiet NaN.
+DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
+DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
+
+// The default NaN values (for FPCR.DN=1).
+DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL);
+DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000);
+
+#undef DEFINE_FLOAT
+#undef DEFINE_DOUBLE
+
+
+enum LSDataSize {
+ LSByte = 0,
+ LSHalfword = 1,
+ LSWord = 2,
+ LSDoubleWord = 3
+};
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
+
+enum ImmBranchType {
+ UnknownBranchType = 0,
+ CondBranchType = 1,
+ UncondBranchType = 2,
+ CompareBranchType = 3,
+ TestBranchType = 4
+};
+
+enum AddrMode {
+ Offset,
+ PreIndex,
+ PostIndex
+};
+
+enum FPRounding {
+ // The first four values are encodable directly by FPCR<RMode>.
+ FPTieEven = 0x0,
+ FPPositiveInfinity = 0x1,
+ FPNegativeInfinity = 0x2,
+ FPZero = 0x3,
+
+ // The final rounding mode is only available when explicitly specified by the
+ // instruction (such as with fcvta). It cannot be set in FPCR.
+ FPTieAway
+};
+
+enum Reg31Mode {
+ Reg31IsStackPointer,
+ Reg31IsZeroRegister
+};
+
+// Instructions. ---------------------------------------------------------------
+
+class Instruction {
+ public:
+ V8_INLINE Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ V8_INLINE void SetInstructionBits(Instr new_instr) {
+ *reinterpret_cast<Instr*>(this) = new_instr;
+ }
+
+ int Bit(int pos) const {
+ return (InstructionBits() >> pos) & 1;
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, InstructionBits());
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ return signed_bitextract_32(msb, lsb, bits);
+ }
+
+ Instr Mask(uint32_t mask) const {
+ return InstructionBits() & mask;
+ }
+
+ V8_INLINE Instruction* following(int count = 1) {
+ return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
+ }
+
+ V8_INLINE Instruction* preceding(int count = 1) {
+ return following(-count);
+ }
+
+ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ int64_t Name() const { return Func(HighBit, LowBit); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
+ #undef DEFINE_GETTER
+
+ // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
+ // formed from ImmPCRelLo and ImmPCRelHi.
+ int ImmPCRel() const {
+ int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int const width = ImmPCRelLo_width + ImmPCRelHi_width;
+ return signed_bitextract_32(width-1, 0, offset);
+ }
+
+ uint64_t ImmLogical();
+ float ImmFP32();
+ double ImmFP64();
+
+ LSDataSize SizeLSPair() const {
+ return CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
+ }
+
+ // Helpers.
+ bool IsCondBranchImm() const {
+ return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
+ }
+
+ bool IsUncondBranchImm() const {
+ return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
+ }
+
+ bool IsCompareBranch() const {
+ return Mask(CompareBranchFMask) == CompareBranchFixed;
+ }
+
+ bool IsTestBranch() const {
+ return Mask(TestBranchFMask) == TestBranchFixed;
+ }
+
+ bool IsLdrLiteral() const {
+ return Mask(LoadLiteralFMask) == LoadLiteralFixed;
+ }
+
+ bool IsLdrLiteralX() const {
+ return Mask(LoadLiteralMask) == LDR_x_lit;
+ }
+
+ bool IsPCRelAddressing() const {
+ return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
+ }
+
+ bool IsLogicalImmediate() const {
+ return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
+ }
+
+ bool IsAddSubImmediate() const {
+ return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
+ }
+
+ bool IsAddSubExtended() const {
+ return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
+ }
+
+ // Match any loads or stores, including pairs.
+ bool IsLoadOrStore() const {
+ return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
+ }
+
+ // Match any loads, including pairs.
+ bool IsLoad() const;
+ // Match any stores, including pairs.
+ bool IsStore() const;
+
+ // Indicate whether Rd can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rd field.
+ Reg31Mode RdMode() const {
+ // The following instructions use csp or wsp as Rd:
+ // Add/sub (immediate) when not setting the flags.
+ // Add/sub (extended) when not setting the flags.
+ // Logical (immediate) when not setting the flags.
+ // Otherwise, r31 is the zero register.
+ if (IsAddSubImmediate() || IsAddSubExtended()) {
+ if (Mask(AddSubSetFlagsBit)) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ if (IsLogicalImmediate()) {
+ // Of the logical (immediate) instructions, only ANDS (and its aliases)
+ // can set the flags. The others can all write into csp.
+ // Note that some logical operations are not available to
+ // immediate-operand instructions, so we have to combine two masks here.
+ if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ // Indicate whether Rn can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rn field.
+ Reg31Mode RnMode() const {
+ // The following instructions use csp or wsp as Rn:
+ // All loads and stores.
+ // Add/sub (immediate).
+ // Add/sub (extended).
+ // Otherwise, r31 is the zero register.
+ if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
+ return Reg31IsStackPointer;
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ ImmBranchType BranchType() const {
+ if (IsCondBranchImm()) {
+ return CondBranchType;
+ } else if (IsUncondBranchImm()) {
+ return UncondBranchType;
+ } else if (IsCompareBranch()) {
+ return CompareBranchType;
+ } else if (IsTestBranch()) {
+ return TestBranchType;
+ } else {
+ return UnknownBranchType;
+ }
+ }
+
+ static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
+ switch (branch_type) {
+ case UncondBranchType:
+ return ImmUncondBranch_width;
+ case CondBranchType:
+ return ImmCondBranch_width;
+ case CompareBranchType:
+ return ImmCmpBranch_width;
+ case TestBranchType:
+ return ImmTestBranch_width;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ }
+
+ // The range of the branch instruction, expressed as 'instr +- range'.
+ static int32_t ImmBranchRange(ImmBranchType branch_type) {
+ return
+ (1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
+ kInstructionSize;
+ }
+
+ int ImmBranch() const {
+ switch (BranchType()) {
+ case CondBranchType: return ImmCondBranch();
+ case UncondBranchType: return ImmUncondBranch();
+ case CompareBranchType: return ImmCmpBranch();
+ case TestBranchType: return ImmTestBranch();
+ default: UNREACHABLE();
+ }
+ return 0;
+ }
+
+ bool IsBranchAndLinkToRegister() const {
+ return Mask(UnconditionalBranchToRegisterMask) == BLR;
+ }
+
+ bool IsMovz() const {
+ return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
+ (Mask(MoveWideImmediateMask) == MOVZ_w);
+ }
+
+ bool IsMovk() const {
+ return (Mask(MoveWideImmediateMask) == MOVK_x) ||
+ (Mask(MoveWideImmediateMask) == MOVK_w);
+ }
+
+ bool IsMovn() const {
+ return (Mask(MoveWideImmediateMask) == MOVN_x) ||
+ (Mask(MoveWideImmediateMask) == MOVN_w);
+ }
+
+ bool IsNop(int n) {
+ // A marking nop is an instruction
+ // mov r<n>, r<n>
+ // which is encoded as
+ // orr r<n>, xzr, r<n>
+ return (Mask(LogicalShiftedMask) == ORR_x) &&
+ (Rd() == Rm()) &&
+ (Rd() == n);
+ }
+
+ // Find the PC offset encoded in this instruction. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ // The offset returned is unscaled.
+ ptrdiff_t ImmPCOffset();
+
+ // Find the target of this instruction. 'this' may be a branch or a
+ // PC-relative addressing instruction.
+ Instruction* ImmPCOffsetTarget();
+
+ static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
+ bool IsTargetInImmPCOffsetRange(Instruction* target);
+ // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ void SetImmPCOffsetTarget(Instruction* target);
+ // Patch a literal load instruction to load from 'source'.
+ void SetImmLLiteral(Instruction* source);
+
+ uint8_t* LiteralAddress() {
+ int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ return reinterpret_cast<uint8_t*>(this) + offset;
+ }
+
+ enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
+
+ V8_INLINE Instruction* InstructionAtOffset(
+ int64_t offset,
+ CheckAlignment check = CHECK_ALIGNMENT) {
+ Address addr = reinterpret_cast<Address>(this) + offset;
+ // The FUZZ_disasm test relies on no check being done.
+ ASSERT(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
+ return Cast(addr);
+ }
+
+ template<typename T> V8_INLINE static Instruction* Cast(T src) {
+ return reinterpret_cast<Instruction*>(src);
+ }
+
+ V8_INLINE ptrdiff_t DistanceTo(Instruction* target) {
+ return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
+ }
+
+
+ void SetPCRelImmTarget(Instruction* target);
+ void SetBranchImmTarget(Instruction* target);
+};
+
+
+// Where Instruction looks at instructions generated by the Assembler,
+// InstructionSequence looks at instructions sequences generated by the
+// MacroAssembler.
+class InstructionSequence : public Instruction {
+ public:
+ static InstructionSequence* At(Address address) {
+ return reinterpret_cast<InstructionSequence*>(address);
+ }
+
+ // Sequences generated by MacroAssembler::InlineData().
+ bool IsInlineData() const;
+ uint64_t InlineData() const;
+};
+
+
+// Simulator/Debugger debug instructions ---------------------------------------
+// Each debug marker is represented by a HLT instruction. The immediate comment
+// field in the instruction is used to identify the type of debug marker. Each
+// marker encodes arguments in a different way, as described below.
+
+// Indicate to the Debugger that the instruction is a redirected call.
+const Instr kImmExceptionIsRedirectedCall = 0xca11;
+
+// Represent unreachable code. This is used as a guard in parts of the code that
+// should not be reachable, such as in data encoded inline in the instructions.
+const Instr kImmExceptionIsUnreachable = 0xdebf;
+
+// A pseudo 'printf' instruction. The arguments will be passed to the platform
+// printf method.
+const Instr kImmExceptionIsPrintf = 0xdeb1;
+// Parameters are stored in ARM64 registers as if the printf pseudo-instruction
+// was a call to the real printf method:
+//
+// x0: The format string, then either of:
+// x1-x7: Optional arguments.
+// d0-d7: Optional arguments.
+//
+// Floating-point and integer arguments are passed in separate sets of
+// registers in AAPCS64 (even for varargs functions), so it is not possible to
+// determine the type of location of each arguments without some information
+// about the values that were passed in. This information could be retrieved
+// from the printf format string, but the format string is not trivial to
+// parse so we encode the relevant information with the HLT instruction.
+// - Type
+// Either kRegister or kFPRegister, but stored as a uint32_t because there's
+// no way to guarantee the size of the CPURegister::RegisterType enum.
+const unsigned kPrintfTypeOffset = 1 * kInstructionSize;
+const unsigned kPrintfLength = 2 * kInstructionSize;
+
+// A pseudo 'debug' instruction.
+const Instr kImmExceptionIsDebug = 0xdeb0;
+// Parameters are inlined in the code after a debug pseudo-instruction:
+// - Debug code.
+// - Debug parameters.
+// - Debug message string. This is a NULL-terminated ASCII string, padded to
+// kInstructionSize so that subsequent instructions are correctly aligned.
+// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
+// string data.
+const unsigned kDebugCodeOffset = 1 * kInstructionSize;
+const unsigned kDebugParamsOffset = 2 * kInstructionSize;
+const unsigned kDebugMessageOffset = 3 * kInstructionSize;
+
+// Debug parameters.
+// Used without a TRACE_ option, the Debugger will print the arguments only
+// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
+// before every instruction for the specified LOG_ parameters.
+//
+// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
+// others that were not specified.
+//
+// For example:
+//
+// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
+// will print the registers and fp registers only once.
+//
+// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
+// starts disassembling the code.
+//
+// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
+// adds the general purpose registers to the trace.
+//
+// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
+// stops tracing the registers.
+const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
+enum DebugParameters {
+ NO_PARAM = 0,
+ BREAK = 1 << 0,
+ LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
+ LOG_REGS = 1 << 2, // Log general purpose registers.
+ LOG_FP_REGS = 1 << 3, // Log floating-point registers.
+ LOG_SYS_REGS = 1 << 4, // Log the status flags.
+ LOG_WRITE = 1 << 5, // Log any memory write.
+
+ LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
+ LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
+
+ // Trace control.
+ TRACE_ENABLE = 1 << 6,
+ TRACE_DISABLE = 2 << 6,
+ TRACE_OVERRIDE = 3 << 6
+};
+
+
+} } // namespace v8::internal
+
+
+#endif // V8_ARM64_INSTRUCTIONS_ARM64_H_
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
new file mode 100644
index 0000000000..6744707fde
--- /dev/null
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -0,0 +1,618 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm64/instrument-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+Counter::Counter(const char* name, CounterType type)
+ : count_(0), enabled_(false), type_(type) {
+ ASSERT(name != NULL);
+ strncpy(name_, name, kCounterNameMaxLength);
+}
+
+
+void Counter::Enable() {
+ enabled_ = true;
+}
+
+
+void Counter::Disable() {
+ enabled_ = false;
+}
+
+
+bool Counter::IsEnabled() {
+ return enabled_;
+}
+
+
+void Counter::Increment() {
+ if (enabled_) {
+ count_++;
+ }
+}
+
+
+uint64_t Counter::count() {
+ uint64_t result = count_;
+ if (type_ == Gauge) {
+ // If the counter is a Gauge, reset the count after reading.
+ count_ = 0;
+ }
+ return result;
+}
+
+
+const char* Counter::name() {
+ return name_;
+}
+
+
+CounterType Counter::type() {
+ return type_;
+}
+
+
+typedef struct {
+ const char* name;
+ CounterType type;
+} CounterDescriptor;
+
+
+static const CounterDescriptor kCounterList[] = {
+ {"Instruction", Cumulative},
+
+ {"Move Immediate", Gauge},
+ {"Add/Sub DP", Gauge},
+ {"Logical DP", Gauge},
+ {"Other Int DP", Gauge},
+ {"FP DP", Gauge},
+
+ {"Conditional Select", Gauge},
+ {"Conditional Compare", Gauge},
+
+ {"Unconditional Branch", Gauge},
+ {"Compare and Branch", Gauge},
+ {"Test and Branch", Gauge},
+ {"Conditional Branch", Gauge},
+
+ {"Load Integer", Gauge},
+ {"Load FP", Gauge},
+ {"Load Pair", Gauge},
+ {"Load Literal", Gauge},
+
+ {"Store Integer", Gauge},
+ {"Store FP", Gauge},
+ {"Store Pair", Gauge},
+
+ {"PC Addressing", Gauge},
+ {"Other", Gauge},
+ {"SP Adjust", Gauge},
+};
+
+
+Instrument::Instrument(const char* datafile, uint64_t sample_period)
+ : output_stream_(stderr), sample_period_(sample_period) {
+
+ // Set up the output stream. If datafile is non-NULL, use that file. If it
+ // can't be opened, or datafile is NULL, use stderr.
+ if (datafile != NULL) {
+ output_stream_ = fopen(datafile, "w");
+ if (output_stream_ == NULL) {
+ fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
+ output_stream_ = stderr;
+ }
+ }
+
+ static const int num_counters =
+ sizeof(kCounterList) / sizeof(CounterDescriptor);
+
+ // Dump an instrumentation description comment at the top of the file.
+ fprintf(output_stream_, "# counters=%d\n", num_counters);
+ fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
+
+ // Construct Counter objects from counter description array.
+ for (int i = 0; i < num_counters; i++) {
+ Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
+ counters_.push_back(counter);
+ }
+
+ DumpCounterNames();
+}
+
+
+Instrument::~Instrument() {
+ // Dump any remaining instruction data to the output file.
+ DumpCounters();
+
+ // Free all the counter objects.
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ delete *it;
+ }
+
+ if (output_stream_ != stderr) {
+ fclose(output_stream_);
+ }
+}
+
+
+void Instrument::Update() {
+ // Increment the instruction counter, and dump all counters if a sample period
+ // has elapsed.
+ static Counter* counter = GetCounter("Instruction");
+ ASSERT(counter->type() == Cumulative);
+ counter->Increment();
+
+ if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
+ DumpCounters();
+ }
+}
+
+
+void Instrument::DumpCounters() {
+ // Iterate through the counter objects, dumping their values to the output
+ // stream.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ fprintf(output_stream_, "%" PRIu64 ",", (*it)->count());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::DumpCounterNames() {
+ // Iterate through the counter objects, dumping the counter names to the
+ // output stream.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ fprintf(output_stream_, "%s,", (*it)->name());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::HandleInstrumentationEvent(unsigned event) {
+ switch (event) {
+ case InstrumentStateEnable: Enable(); break;
+ case InstrumentStateDisable: Disable(); break;
+ default: DumpEventMarker(event);
+ }
+}
+
+
+void Instrument::DumpEventMarker(unsigned marker) {
+ // Dumpan event marker to the output stream as a specially formatted comment
+ // line.
+ static Counter* counter = GetCounter("Instruction");
+
+ fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
+ (marker >> 8) & 0xff, counter->count());
+}
+
+
+Counter* Instrument::GetCounter(const char* name) {
+ // Get a Counter object by name from the counter list.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ if (strcmp((*it)->name(), name) == 0) {
+ return *it;
+ }
+ }
+
+ // A Counter by that name does not exist: print an error message to stderr
+ // and the output file, and exit.
+ static const char* error_message =
+ "# Error: Unknown counter \"%s\". Exiting.\n";
+ fprintf(stderr, error_message, name);
+ fprintf(output_stream_, error_message, name);
+ exit(1);
+}
+
+
+void Instrument::Enable() {
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ (*it)->Enable();
+ }
+}
+
+
+void Instrument::Disable() {
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ (*it)->Disable();
+ }
+}
+
+
+void Instrument::VisitPCRelAddressing(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("PC Addressing");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubImmediate(Instruction* instr) {
+ Update();
+ static Counter* sp_counter = GetCounter("SP Adjust");
+ static Counter* add_sub_counter = GetCounter("Add/Sub DP");
+ if (((instr->Mask(AddSubOpMask) == SUB) ||
+ (instr->Mask(AddSubOpMask) == ADD)) &&
+ (instr->Rd() == 31) && (instr->Rn() == 31)) {
+ // Count adjustments to the C stack pointer caused by V8 needing two SPs.
+ sp_counter->Increment();
+ } else {
+ add_sub_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLogicalImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitMoveWideImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Move Immediate");
+
+ if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
+ unsigned imm = instr->ImmMoveWide();
+ HandleInstrumentationEvent(imm);
+ } else {
+ counter->Increment();
+ }
+}
+
+
+void Instrument::VisitBitfield(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitExtract(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCompareBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Compare and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitTestBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Test and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitSystem(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitException(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStorePair(Instruction* instr) {
+ static Counter* load_pair_counter = GetCounter("Load Pair");
+ static Counter* store_pair_counter = GetCounter("Store Pair");
+ if (instr->Mask(LoadStorePairLBit) != 0) {
+ load_pair_counter->Increment();
+ } else {
+ store_pair_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadLiteral(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Load Literal");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStore(Instruction* instr) {
+ static Counter* load_int_counter = GetCounter("Load Integer");
+ static Counter* store_int_counter = GetCounter("Store Integer");
+ static Counter* load_fp_counter = GetCounter("Load FP");
+ static Counter* store_fp_counter = GetCounter("Store FP");
+
+ switch (instr->Mask(LoadStoreOpMask)) {
+ case STRB_w: // Fall through.
+ case STRH_w: // Fall through.
+ case STR_w: // Fall through.
+ case STR_x: store_int_counter->Increment(); break;
+ case STR_s: // Fall through.
+ case STR_d: store_fp_counter->Increment(); break;
+ case LDRB_w: // Fall through.
+ case LDRH_w: // Fall through.
+ case LDR_w: // Fall through.
+ case LDR_x: // Fall through.
+ case LDRSB_x: // Fall through.
+ case LDRSH_x: // Fall through.
+ case LDRSW_x: // Fall through.
+ case LDRSB_w: // Fall through.
+ case LDRSH_w: load_int_counter->Increment(); break;
+ case LDR_s: // Fall through.
+ case LDR_d: load_fp_counter->Increment(); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLogicalShifted(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubShifted(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubExtended(Instruction* instr) {
+ Update();
+ static Counter* sp_counter = GetCounter("SP Adjust");
+ static Counter* add_sub_counter = GetCounter("Add/Sub DP");
+ if (((instr->Mask(AddSubOpMask) == SUB) ||
+ (instr->Mask(AddSubOpMask) == ADD)) &&
+ (instr->Rd() == 31) && (instr->Rn() == 31)) {
+ // Count adjustments to the C stack pointer caused by V8 needing two SPs.
+ sp_counter->Increment();
+ } else {
+ add_sub_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitAddSubWithCarry(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalSelect(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing1Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing2Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing3Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPCompare(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalCompare(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalSelect(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPIntegerConvert(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnallocated(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnimplemented(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/instrument-arm64.h b/deps/v8/src/arm64/instrument-arm64.h
new file mode 100644
index 0000000000..996cc07acb
--- /dev/null
+++ b/deps/v8/src/arm64/instrument-arm64.h
@@ -0,0 +1,107 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_INSTRUMENT_ARM64_H_
+#define V8_ARM64_INSTRUMENT_ARM64_H_
+
+#include "globals.h"
+#include "utils.h"
+#include "arm64/decoder-arm64.h"
+#include "arm64/constants-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+const int kCounterNameMaxLength = 256;
+const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
+
+
+enum InstrumentState {
+ InstrumentStateDisable = 0,
+ InstrumentStateEnable = 1
+};
+
+
+enum CounterType {
+ Gauge = 0, // Gauge counters reset themselves after reading.
+ Cumulative = 1 // Cumulative counters keep their value after reading.
+};
+
+
+class Counter {
+ public:
+ Counter(const char* name, CounterType type = Gauge);
+
+ void Increment();
+ void Enable();
+ void Disable();
+ bool IsEnabled();
+ uint64_t count();
+ const char* name();
+ CounterType type();
+
+ private:
+ char name_[kCounterNameMaxLength];
+ uint64_t count_;
+ bool enabled_;
+ CounterType type_;
+};
+
+
+class Instrument: public DecoderVisitor {
+ public:
+ explicit Instrument(const char* datafile = NULL,
+ uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
+ ~Instrument();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ void Update();
+ void Enable();
+ void Disable();
+ void DumpCounters();
+ void DumpCounterNames();
+ void DumpEventMarker(unsigned marker);
+ void HandleInstrumentationEvent(unsigned event);
+ Counter* GetCounter(const char* name);
+
+ void InstrumentLoadStore(Instruction* instr);
+ void InstrumentLoadStorePair(Instruction* instr);
+
+ std::list<Counter*> counters_;
+
+ FILE *output_stream_;
+ uint64_t sample_period_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_INSTRUMENT_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
new file mode 100644
index 0000000000..60bf51ebbd
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -0,0 +1,2576 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-allocator-inl.h"
+#include "arm64/lithium-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(),
+ true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if ((parallel_moves_[i] != NULL) && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ hydrogen()->access().PrintTo(stream);
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("%p -> %p", *original_map(), *transitioned_map());
+}
+
+
+template<int T>
+void LUnaryMathOperation<T>::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
+ case Token::SHL: return "shl-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void LChunkBuilder::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAndClobber(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant() ? UseConstant(value) : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant() ? UseConstant(value) : UseRegisterAtStart(value);
+}
+
+
+LConstantOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? UseConstant(value)
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr, int index) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+ LTemplateResultInstruction<1>* instr, Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+int LPlatformChunk::GetNextSpillIndex() {
+ return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex();
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ ASSERT(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ ASSERT(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new(zone()) LPlatformChunk(info_, graph_);
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ // TODO(all): GetNextSpillIndex just increments a field. It has no other
+ // side effects, so we should get rid of this loop.
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex();
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ DoBasicBlock(blocks->at(i));
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
+ ASSERT(is_building());
+ current_block_ = block;
+
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
+ (pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+
+ // Translate hydrogen instructions to lithium ones for the current block.
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while ((current != NULL) && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, the register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = current;
+ LInstruction* instruction_needing_environment = NULL;
+ if (current->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(current->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
+ }
+ current_instruction_ = old_current;
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator,
+ &objects_to_materialize));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = UseFixedDouble(instr->right(), d1);
+ LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ ASSERT((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
+ (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
+ (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
+ (op == Token::BIT_OR) || (op == Token::BIT_AND) ||
+ (op == Token::BIT_XOR));
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+
+ // TODO(jbramley): Once we've implemented smi support for all arithmetic
+ // operations, these assertions should check IsTagged().
+ ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(left->representation().IsSmiOrTagged());
+ ASSERT(right->representation().IsSmiOrTagged());
+
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left_operand = UseFixed(left, x1);
+ LOperand* right_operand = UseFixed(right, x0);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* args = NULL;
+ LOperand* length = NULL;
+ LOperand* index = NULL;
+
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ args = UseRegisterAtStart(instr->arguments());
+ length = UseConstant(instr->length());
+ index = UseConstant(instr->index());
+ } else {
+ args = UseRegister(instr->arguments());
+ length = UseRegisterAtStart(instr->length());
+ index = UseRegisterOrConstantAtStart(instr->index());
+ }
+
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right =
+ UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ LInstruction* result = instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LAddS(left, right)) :
+ DefineAsRegister(new(zone()) LAddI(left, right));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return DefineAsRegister(new(zone()) LAddE(left, right));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseRegisterOrConstant(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2, temp3);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), x1);
+ LOperand* receiver = UseFixed(instr->receiver(), x0);
+ LOperand* length = UseFixed(instr->length(), x2);
+ LOperand* elements = UseFixed(instr->elements(), x3);
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* temp = instr->from_inlined() ? NULL : TempRegister();
+ return DefineAsRegister(new(zone()) LArgumentsElements(temp));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LArgumentsLength(value));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right =
+ UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ return instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LBitS(left, right)) :
+ DefineAsRegister(new(zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ // V8 expects a label to be generated for each basic block.
+ // This is used in some places like LAllocator::IsBlockBoundary
+ // in lithium-allocator.cc
+ return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ LOperand* value = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = UseRegister(instr->length());
+ return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+
+ if (r.IsInteger32() || r.IsSmi() || r.IsDouble()) {
+ // These representations have simple checks that cannot deoptimize.
+ return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+ } else {
+ ASSERT(r.IsTagged());
+ if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
+ type.IsHeapNumber()) {
+ // These types have simple checks that cannot deoptimize.
+ return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+ }
+
+ if (type.IsString()) {
+ // This type cannot deoptimize, but needs a scratch register.
+ return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
+ }
+
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
+ LOperand* temp1 = needs_temps ? TempRegister() : NULL;
+ LOperand* temp2 = needs_temps ? TempRegister() : NULL;
+
+ if (expected.IsGeneric() || expected.IsEmpty()) {
+ // The generic case cannot deoptimize because it already supports every
+ // possible input type.
+ ASSERT(needs_temps);
+ return new(zone()) LBranch(UseRegister(value), temp1, temp2);
+ } else {
+ return AssignEnvironment(
+ new(zone()) LBranch(UseRegister(value), temp1, temp2));
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), x1);
+
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
+ ops,
+ zone());
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), x1);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The call to CallConstructStub will expect the constructor to be in x1.
+ LOperand* constructor = UseFixed(instr->constructor(), x1);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The call to ArrayConstructCode will expect the constructor to be in x1.
+ LOperand* constructor = UseFixed(instr->constructor(), x1);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->value()->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ LInstruction* res = NULL;
+
+ if (instr->value()->type().IsSmi() ||
+ instr->value()->representation().IsSmi()) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = instr->CanTruncateToInt32() ? NULL : FixedTemp(d24);
+ res = DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2));
+ res = AssignEnvironment(res);
+ }
+
+ return res;
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ } else {
+ ASSERT(to.IsSmi() || to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+
+ if (instr->CanTruncateToInt32()) {
+ LTruncateDoubleToIntOrSmi* result =
+ new(zone()) LTruncateDoubleToIntOrSmi(value);
+ return DefineAsRegister(result);
+ } else {
+ LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegister(instr->value());
+ LNumberTagU* result = new(zone()) LNumberTagU(value,
+ TempRegister(),
+ TempRegister());
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
+ (kMaxInt == Smi::kMaxValue));
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else {
+ ASSERT(to.IsDouble());
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegisterAtStart(instr->value())));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(UseRegisterAtStart(instr->value())));
+ }
+ }
+ }
+
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LInstruction* result = new(zone()) LCheckInstanceType(value, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->CanOmitMapChecks()) {
+ // LCheckMaps does nothing in this case.
+ return new(zone()) LCheckMaps(NULL);
+ } else {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+
+ if (instr->has_migration_target()) {
+ info()->MarkAsDeferredCalling();
+ LInstruction* result = new(zone()) LCheckMaps(value, temp);
+ return AssignPointerMap(AssignEnvironment(result));
+ } else {
+ return AssignEnvironment(new(zone()) LCheckMaps(value, temp));
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsSmiOrTagged());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LClampTToUint8(reg,
+ TempRegister(),
+ FixedTemp(d24))));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LClassOfTestAndBranch(value,
+ TempRegister(),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ // TODO(all): In fact the only case that we can handle more efficiently is
+ // when one of the operand is the constant 0. Currently the MacroAssembler
+ // will be able to cope with any constant by loading it into an internal
+ // scratch register. This means that if the constant is used more that once,
+ // it will be loaded multiple times. Unfortunatly crankshaft already
+ // duplicates constant loads, but we should modify the code below once this
+ // issue has been addressed in crankshaft.
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->representation().IsTagged()) {
+ return new(zone()) LCmpHoleAndBranchT(value);
+ } else {
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpHoleAndBranchD(value, temp);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new(zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new(zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* object = UseFixed(instr->value(), x0);
+ LDateField* result = new(zone()) LDateField(object, instr->index());
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+ ? NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+ ? NULL : TempRegister();
+ LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
+ instr->function(),
+ undefined,
+ instr->inlining_kind());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if ((instr->arguments_var() != NULL) &&
+ instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(
+ HForceRepresentation* instr) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LInstanceOf* result = new(zone()) LInstanceOf(
+ context,
+ UseFixed(instr->left(), InstanceofStub::left()),
+ UseFixed(instr->right(), InstanceofStub::right()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result = new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), InstanceofStub::left()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
+ LOperand* function = UseFixed(instr->function(), x1);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new(zone()) LIsConstructCallAndBranch(TempRegister(), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ return new(zone()) LIsObjectAndBranch(value, temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LDrop(argument_count);
+ ASSERT(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer =
+ current_block_->last_environment()->DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+
+ return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ LOperand* function = UseRegister(instr->function());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LLoadFunctionPrototype(function, temp)));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell();
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* global_object = UseFixed(instr->global_object(), x0);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* elements = UseRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ if (!instr->is_typed_elements()) {
+ if (instr->representation().IsDouble()) {
+ LOperand* temp = (!instr->key()->IsConstant() ||
+ instr->RequiresHoleCheck())
+ ? TempRegister()
+ : NULL;
+
+ LLoadKeyedFixedDouble* result =
+ new(zone()) LLoadKeyedFixedDouble(elements, key, temp);
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+ } else {
+ ASSERT(instr->representation().IsSmiOrTagged() ||
+ instr->representation().IsInteger32());
+ LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ LLoadKeyedFixed* result =
+ new(zone()) LLoadKeyedFixed(elements, key, temp);
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+ }
+ } else {
+ ASSERT((instr->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+
+ LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ LLoadKeyedExternal* result =
+ new(zone()) LLoadKeyedExternal(elements, key, temp);
+ // An unsigned int array load might overflow and cause a deopt. Make sure it
+ // has an environment.
+ if (instr->RequiresHoleCheck() ||
+ elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) {
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ return DefineAsRegister(result);
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x1);
+ LOperand* key = UseFixed(instr->key(), x0);
+
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), x0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(object));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), x0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* remainder = TempRegister();
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LFlooringDivI(dividend, divisor, remainder));
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp = TempRegister();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor, temp));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
+ } else {
+ return DoModI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
+ } else {
+ return DoArithmeticT(Token::MOD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool needs_environment = can_overflow || bailout_on_minus_zero;
+
+ HValue* least_const = instr->BetterLeftOperand();
+ HValue* most_const = instr->BetterRightOperand();
+
+ LOperand* left;
+
+ // LMulConstI can handle a subset of constants:
+ // With support for overflow detection:
+ // -1, 0, 1, 2
+ // 2^n, -(2^n)
+ // Without support for overflow detection:
+ // 2^n + 1, -(2^n - 1)
+ if (most_const->IsConstant()) {
+ int32_t constant = HConstant::cast(most_const)->Integer32Value();
+ bool small_constant = (constant >= -1) && (constant <= 2);
+ bool end_range_constant = (constant <= -kMaxInt) || (constant == kMaxInt);
+ int32_t constant_abs = Abs(constant);
+
+ if (!end_range_constant &&
+ (small_constant ||
+ (IsPowerOf2(constant_abs)) ||
+ (!can_overflow && (IsPowerOf2(constant_abs + 1) ||
+ IsPowerOf2(constant_abs - 1))))) {
+ LConstantOperand* right = UseConstant(most_const);
+ bool need_register = IsPowerOf2(constant_abs) && !small_constant;
+ left = need_register ? UseRegister(least_const)
+ : UseRegisterAtStart(least_const);
+ LMulConstIS* mul = new(zone()) LMulConstIS(left, right);
+ if (needs_environment) AssignEnvironment(mul);
+ return DefineAsRegister(mul);
+ }
+ }
+
+ left = UseRegisterAtStart(least_const);
+ // LMulI/S can handle all cases, but it requires that a register is
+ // allocated for the second operand.
+ LInstruction* result;
+ if (instr->representation().IsSmi()) {
+ LOperand* right = UseRegisterAtStart(most_const);
+ result = DefineAsRegister(new(zone()) LMulS(left, right));
+ } else {
+ LOperand* right = UseRegisterAtStart(most_const);
+ result = DefineAsRegister(new(zone()) LMulI(left, right));
+ }
+ if (needs_environment) AssignEnvironment(result);
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk_->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor->GetParameterRegister(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = exponent_type.IsInteger32()
+ ? UseFixed(instr->right(), x12)
+ : exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), d1)
+ : UseFixed(instr->right(), x11);
+ LPower* result = new(zone()) LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+ LOperand* argument = UseRegister(instr->argument());
+ return new(zone()) LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ LOperand* temp = TempRegister();
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), x0), context,
+ parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* temp = TempRegister();
+ LSeqStringGetChar* result =
+ new(zone()) LSeqStringGetChar(string, index, temp);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegister(instr->index())
+ : UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ LOperand* temp = TempRegister();
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(context, string, index, value, temp);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ return DoArithmeticT(op, instr);
+ }
+
+ ASSERT(instr->representation().IsInteger32() ||
+ instr->representation().IsSmi());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ LOperand* left = instr->representation().IsSmi()
+ ? UseRegister(instr->left())
+ : UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ LOperand* temp = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ right = UseConstant(right_value);
+ HConstant* constant = HConstant::cast(right_value);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseRegisterAtStart(right_value);
+ if (op == Token::ROR) {
+ temp = TempRegister();
+ }
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift by 0 and the
+ // result cannot be truncated to int32.
+ bool does_deopt = false;
+ if ((op == Token::SHR) && (constant_value == 0)) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result;
+ if (instr->representation().IsInteger32()) {
+ result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ } else {
+ ASSERT(instr->representation().IsSmi());
+ result = DefineAsRegister(
+ new(zone()) LShiftS(op, left, right, temp, does_deopt));
+ }
+
+ return does_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(HStoreCodeEntry* instr) {
+ LOperand* function = UseRegister(instr->function());
+ LOperand* code_object = UseRegisterAtStart(instr->code_object());
+ LOperand* temp = TempRegister();
+ return new(zone()) LStoreCodeEntry(function, code_object, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* temp = TempRegister();
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ // TODO(all): Replace these constraints when RecordWriteStub has been
+ // rewritten.
+ context = UseRegisterAndClobber(instr->context());
+ value = UseRegisterAndClobber(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->RequiresHoleCheck()) {
+ return AssignEnvironment(new(zone()) LStoreGlobalCell(value,
+ TempRegister(),
+ TempRegister()));
+ } else {
+ return new(zone()) LStoreGlobalCell(value, TempRegister(), NULL);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ LOperand* temp = NULL;
+ LOperand* elements = NULL;
+ LOperand* val = NULL;
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ if (!instr->is_typed_elements() &&
+ instr->value()->representation().IsTagged() &&
+ instr->NeedsWriteBarrier()) {
+ // RecordWrite() will clobber all registers.
+ elements = UseRegisterAndClobber(instr->elements());
+ val = UseRegisterAndClobber(instr->value());
+ temp = TempRegister();
+ } else {
+ elements = UseRegister(instr->elements());
+ val = UseRegister(instr->value());
+ temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ }
+
+ if (instr->is_typed_elements()) {
+ ASSERT((instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
+ return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
+
+ } else if (instr->value()->representation().IsDouble()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
+
+ } else {
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged() ||
+ instr->value()->representation().IsInteger32());
+ return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x2);
+ LOperand* key = UseFixed(instr->key(), x1);
+ LOperand* value = UseFixed(instr->value(), x0);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ // TODO(jbramley): It might be beneficial to allow value to be a constant in
+ // some cases. x64 makes use of this with FLAG_track_fields, for example.
+
+ LOperand* object = UseRegister(instr->object());
+ LOperand* value;
+ LOperand* temp0 = NULL;
+ LOperand* temp1 = NULL;
+
+ if (instr->access().IsExternalMemory() ||
+ instr->field_representation().IsDouble()) {
+ value = UseRegister(instr->value());
+ } else if (instr->NeedsWriteBarrier()) {
+ value = UseRegisterAndClobber(instr->value());
+ temp0 = TempRegister();
+ temp1 = TempRegister();
+ } else if (instr->NeedsWriteBarrierForMap()) {
+ value = UseRegister(instr->value());
+ temp0 = TempRegister();
+ temp1 = TempRegister();
+ } else {
+ value = UseRegister(instr->value());
+ temp0 = TempRegister();
+ }
+
+ LStoreNamedField* result =
+ new(zone()) LStoreNamedField(object, value, temp0, temp1);
+ if (instr->field_representation().IsHeapObject() &&
+ !instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x1);
+ LOperand* value = UseFixed(instr->value(), x0);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+
+ LStringAdd* result = new(zone()) LStringAdd(context, left, right);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseRegisterAndClobber(instr->string());
+ LOperand* index = UseRegisterAndClobber(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+ LStringCompareAndBranch* result =
+ new(zone()) LStringCompareAndBranch(context, left, right);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand *left;
+ if (instr->left()->IsConstant() &&
+ (HConstant::cast(instr->left())->Integer32Value() == 0)) {
+ left = UseConstant(instr->left());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ }
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ LInstruction* result = instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LSubS(left, right)) :
+ DefineAsRegister(new(zone()) LSubI(left, right));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ if (instr->HasNoUses()) {
+ return NULL;
+ } else {
+ return DefineAsRegister(new(zone()) LThisFunction);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), x0);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL,
+ TempRegister(), TempRegister());
+ return result;
+ } else {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, context, TempRegister());
+ return AssignPointerMap(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp1, temp2);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // TODO(jbramley): In ARM, this uses UseFixed to force the input to x0.
+ // However, LCodeGen::DoTypeof just pushes it to the stack (for CallRuntime)
+ // anyway, so the input doesn't have to be in x0. We might be able to improve
+ // the ARM back-end a little by relaxing this restriction.
+ LTypeof* result =
+ new(zone()) LTypeof(context, UseRegisterAtStart(instr->value()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ // We only need temp registers in some cases, but we can't dereference the
+ // instr->type_literal() handle to test that here.
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ return new(zone()) LTypeofIsAndBranch(
+ UseRegister(instr->value()), temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathAbs: {
+ Representation r = instr->representation();
+ if (r.IsTagged()) {
+ // The tagged case might need to allocate a HeapNumber for the result,
+ // so it is handled by a separate LInstruction.
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+ LMathAbsTagged* result =
+ new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(input);
+ if (r.IsDouble()) {
+ // The Double case can never fail so it doesn't need an environment.
+ return DefineAsRegister(result);
+ } else {
+ ASSERT(r.IsInteger32() || r.IsSmi());
+ // The Integer32 and Smi cases need an environment because they can
+ // deoptimize on minimum representable number.
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ }
+ }
+ case kMathExp: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ // TODO(all): Implement TempFPRegister.
+ LOperand* double_temp1 = FixedTemp(d24); // This was chosen arbitrarily.
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(input, double_temp1,
+ temp1, temp2, temp3);
+ return DefineAsRegister(result);
+ }
+ case kMathFloor: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsDouble());
+ // TODO(jbramley): ARM64 can easily handle a double argument with frintm,
+ // but we're never asked for it here. At the moment, we fall back to the
+ // runtime if the result doesn't fit, like the other architectures.
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ }
+ case kMathLog: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ LMathLog* result = new(zone()) LMathLog(input);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ }
+ case kMathPowHalf: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ return DefineAsRegister(new(zone()) LMathPowHalf(input));
+ }
+ case kMathRound: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsDouble());
+ // TODO(jbramley): As with kMathFloor, we can probably handle double
+ // results fairly easily, but we are never asked for them.
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(d24); // Choosen arbitrarily.
+ LMathRound* result = new(zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ case kMathSqrt: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathSqrt(input));
+ }
+ case kMathClz32: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsInteger32());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathClz32(input));
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk_->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
+ }
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // Assign object to a fixed register different from those already used in
+ // LForInPrepareMap.
+ LOperand* object = UseFixed(instr->enumerable(), x0);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegister(instr->map());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ LOperand* index = UseRegister(instr->index());
+ return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegister(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
new file mode 100644
index 0000000000..da3c5f17b5
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -0,0 +1,3100 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_LITHIUM_ARM64_H_
+#define V8_ARM64_LITHIUM_ARM64_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "lithium.h"
+#include "safepoint-table.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddE) \
+ V(AddI) \
+ V(AddS) \
+ V(Allocate) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BitS) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallFunction) \
+ V(CallJSFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CallWithDescriptor) \
+ V(CheckInstanceType) \
+ V(CheckMapValue) \
+ V(CheckMaps) \
+ V(CheckNonSmi) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CmpHoleAndBranchD) \
+ V(CmpHoleAndBranchT) \
+ V(CmpMapAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpT) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToIntOrSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyedExternal) \
+ V(LoadKeyedFixed) \
+ V(LoadKeyedFixedDouble) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadRoot) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathAbsTagged) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulConstIS) \
+ V(MulI) \
+ V(MulS) \
+ V(NumberTagD) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(ShiftS) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreGlobalCell) \
+ V(StoreKeyedExternal) \
+ V(StoreKeyedFixed) \
+ V(StoreKeyedFixedDouble) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(SubS) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(TruncateDoubleToIntOrSmi) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(this->hydrogen_value()); \
+ }
+
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) { }
+
+ virtual ~LInstruction() { }
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ bool IsMarkedAsCall() const { return IsCall(); }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ private:
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int32_t bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return (R != 0) && (result() != NULL);
+ }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+};
+
+
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ DECLARE_HYDROGEN_ACCESSOR(ControlInstruction);
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block)
+ : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap V8_FINAL : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+
+ int block_id() const { return block_->block_id(); }
+
+ private:
+ HBasicBlock* block_;
+};
+
+
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
+ }
+ int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+ int gap_instructions_size_;
+};
+
+
+class LLabel V8_FINAL : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry() {}
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments,
+ LOperand* length,
+ LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddE(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+ public:
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+ public:
+ explicit LArgumentsElements(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+ Token::Value op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitS, "bit-s")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckMaps(LOperand* value, LOperand* temp = NULL) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckValue(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LClampTToUint8(LOperand* unclamped, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo, LOperand* temp) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ temps_[0] = temp;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchD, "cmp-hole-and-branch-d")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranchT(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchT, "cmp-hole-and-branch-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->representation().IsDouble();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDateField(LOperand* date, Smi* index) : index_(index) {
+ inputs_[0] = date;
+ }
+
+ LOperand* date() { return inputs_[0]; }
+ Smi* index() const { return index_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ private:
+ Smi* index_;
+};
+
+
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LDivI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToIntOrSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToIntOrSmi, "double-to-int-or-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 1> {
+ public:
+ LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+ return lazy_deopt_env_;
+ }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
+ lazy_deopt_env_ = env;
+ }
+
+ private:
+ LEnvironment* lazy_deopt_env_;
+};
+
+
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+};
+
+
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
+ public:
+ LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() const { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ inputs_[0] = function;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+template<int T>
+class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key) {
+ this->inputs_[0] = elements;
+ this->inputs_[1] = key;
+ }
+
+ LOperand* elements() { return this->inputs_[0]; }
+ LOperand* key() { return this->inputs_[1]; }
+ ElementsKind elements_kind() const {
+ return this->hydrogen()->elements_kind();
+ }
+ bool is_external() const {
+ return this->hydrogen()->is_external();
+ }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ uint32_t additional_index() const {
+ return this->hydrogen()->index_offset();
+ }
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ this->elements()->PrintTo(stream);
+ stream->Add("[");
+ this->key()->PrintTo(stream);
+ if (this->hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", this->additional_index());
+ } else {
+ stream->Add("]");
+ }
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+};
+
+
+class LLoadKeyedExternal: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedExternal(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedExternal, "load-keyed-external");
+};
+
+
+class LLoadKeyedFixed: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixed, "load-keyed-fixed");
+};
+
+
+class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixedDouble, "load-keyed-fixed-double");
+};
+
+
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+};
+
+
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+template<int T>
+class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
+ public:
+ explicit LUnaryMathOperation(LOperand* value) {
+ this->inputs_[0] = value;
+ }
+
+ LOperand* value() { return this->inputs_[0]; }
+ BuiltinFunctionId op() const { return this->hydrogen()->op(); }
+
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+};
+
+
+class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
+ public:
+ LMathAbsTagged(LOperand* context, LOperand* value,
+ LOperand* temp1, LOperand* temp2, LOperand* temp3) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbsTagged, "math-abs-tagged")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp1,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3)
+ : LUnaryMathOperation<4>(value) {
+ temps_[0] = double_temp1;
+ temps_[1] = temp1;
+ temps_[2] = temp2;
+ temps_[3] = temp3;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* double_temp1() { return temps_[0]; }
+ LOperand* temp1() { return temps_[1]; }
+ LOperand* temp2() { return temps_[2]; }
+ LOperand* temp3() { return temps_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathFloor V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathFloor(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+};
+
+
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMathLog V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathClz32(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LMathRound V8_FINAL : public LUnaryMathOperation<1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp1)
+ : LUnaryMathOperation<1>(value) {
+ temps_[0] = temp1;
+ }
+
+ LOperand* temp1() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+};
+
+
+class LMathSqrt V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LModI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulConstIS(LOperand* left, LConstantOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LConstantOperand* right() { return LConstantOperand::cast(inputs_[1]); }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulConstIS, "mul-const-i-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LNumberTagU(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberUntagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LSeqStringGetChar(LOperand* string,
+ LOperand* index,
+ LOperand* temp) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ temps_[0] = temp;
+ }
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value,
+ LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ bool needs_check() const { return needs_check_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+template<int T>
+class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
+ public:
+ LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value) {
+ this->inputs_[0] = elements;
+ this->inputs_[1] = key;
+ this->inputs_[2] = value;
+ }
+
+ bool is_external() const { return this->hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ LOperand* elements() { return this->inputs_[0]; }
+ LOperand* key() { return this->inputs_[1]; }
+ LOperand* value() { return this->inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return this->hydrogen()->elements_kind();
+ }
+
+ bool NeedsCanonicalization() {
+ return this->hydrogen()->NeedsCanonicalization();
+ }
+ uint32_t additional_index() const { return this->hydrogen()->index_offset(); }
+
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ this->elements()->PrintTo(stream);
+ stream->Add("[");
+ this->key()->PrintTo(stream);
+ if (this->hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", this->additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (this->value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ this->value()->PrintTo(stream);
+ }
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+};
+
+
+class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedExternal, "store-keyed-external")
+};
+
+
+class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixed, "store-keyed-fixed")
+};
+
+
+class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixedDouble,
+ "store-keyed-fixed-double")
+};
+
+
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LStoreNamedField(LOperand* object, LOperand* value,
+ LOperand* temp0, LOperand* temp1) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp0;
+ temps_[1] = temp1;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp0() { return temps_[0]; }
+ LOperand* temp1() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
+};
+
+
+class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LShiftS(Token::Value op, LOperand* left, LOperand* right, LOperand* temp,
+ bool can_deopt) : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object,
+ LOperand* temp) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LSubS: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* context,
+ LOperand* temp1,
+ LOperand* temp2 = NULL) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() const { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() const { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = object;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+
+class LTruncateDoubleToIntOrSmi V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TruncateDoubleToIntOrSmi,
+ "truncate-double-to-int-or-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() const { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk V8_FINAL : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
+
+ int GetNextSpillIndex();
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ allocator_(allocator) { }
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ static bool HasMagicNumberForDivision(int32_t divisor);
+
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ HGraph* graph() const { return graph_; }
+ Isolate* isolate() const { return info_->isolate(); }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ int argument_count() const { return argument_count_; }
+ CompilationInfo* info() const { return info_; }
+ Heap* heap() const { return isolate()->heap(); }
+
+ void Abort(BailoutReason reason);
+
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // The operand created by UseRegister is guaranteed to be live until the end
+ // of the instruction. This means that register allocator will not reuse its
+ // register for any other operand inside instruction.
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+
+ // The operand created by UseRegisterAndClobber is guaranteed to be live until
+ // the end of the end of the instruction, and it may also be used as a scratch
+ // register by the instruction implementation.
+ //
+ // This behaves identically to ARM's UseTempRegister. However, it is renamed
+ // to discourage its use in ARM64, since in most cases it is better to
+ // allocate a temporary register for the Lithium instruction.
+ MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value);
+
+ // The operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. The register allocator is free to assign the same
+ // register to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // A constant operand.
+ MUST_USE_RESULT LConstantOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+
+ // Temporary operand that must be in a fixed double register.
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ LInstruction* AssignPointerMap(LInstruction* instr);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+
+ void VisitInstruction(HInstruction* current);
+ void DoBasicBlock(HBasicBlock* block);
+
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr);
+
+ LPlatformChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ LAllocator* allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_LITHIUM_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
new file mode 100644
index 0000000000..cd931e934c
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -0,0 +1,5901 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "arm64/lithium-codegen-arm64.h"
+#include "arm64/lithium-gap-resolver-arm64.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator V8_FINAL : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deopt_mode_(mode) { }
+ virtual ~SafepointGenerator() { }
+
+ virtual void BeforeCall(int call_size) const { }
+
+ virtual void AfterCall() const {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+// Emit code to branch if the given condition holds.
+// The code generated here doesn't modify the flags and they must have
+// been set by some prior instructions.
+//
+// The EmitInverted function simply inverts the condition.
+class BranchOnCondition : public BranchGenerator {
+ public:
+ BranchOnCondition(LCodeGen* codegen, Condition cond)
+ : BranchGenerator(codegen),
+ cond_(cond) { }
+
+ virtual void Emit(Label* label) const {
+ __ B(cond_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ if (cond_ != al) {
+ __ B(InvertCondition(cond_), label);
+ }
+ }
+
+ private:
+ Condition cond_;
+};
+
+
+// Emit code to compare lhs and rhs and branch if the condition holds.
+// This uses MacroAssembler's CompareAndBranch function so it will handle
+// converting the comparison to Cbz/Cbnz if the right-hand side is 0.
+//
+// EmitInverted still compares the two operands but inverts the condition.
+class CompareAndBranch : public BranchGenerator {
+ public:
+ CompareAndBranch(LCodeGen* codegen,
+ Condition cond,
+ const Register& lhs,
+ const Operand& rhs)
+ : BranchGenerator(codegen),
+ cond_(cond),
+ lhs_(lhs),
+ rhs_(rhs) { }
+
+ virtual void Emit(Label* label) const {
+ __ CompareAndBranch(lhs_, rhs_, cond_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
+ }
+
+ private:
+ Condition cond_;
+ const Register& lhs_;
+ const Operand& rhs_;
+};
+
+
+// Test the input with the given mask and branch if the condition holds.
+// If the condition is 'eq' or 'ne' this will use MacroAssembler's
+// TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
+// conversion to Tbz/Tbnz when possible.
+class TestAndBranch : public BranchGenerator {
+ public:
+ TestAndBranch(LCodeGen* codegen,
+ Condition cond,
+ const Register& value,
+ uint64_t mask)
+ : BranchGenerator(codegen),
+ cond_(cond),
+ value_(value),
+ mask_(mask) { }
+
+ virtual void Emit(Label* label) const {
+ switch (cond_) {
+ case eq:
+ __ TestAndBranchIfAllClear(value_, mask_, label);
+ break;
+ case ne:
+ __ TestAndBranchIfAnySet(value_, mask_, label);
+ break;
+ default:
+ __ Tst(value_, mask_);
+ __ B(cond_, label);
+ }
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ // The inverse of "all clear" is "any set" and vice versa.
+ switch (cond_) {
+ case eq:
+ __ TestAndBranchIfAnySet(value_, mask_, label);
+ break;
+ case ne:
+ __ TestAndBranchIfAllClear(value_, mask_, label);
+ break;
+ default:
+ __ Tst(value_, mask_);
+ __ B(InvertCondition(cond_), label);
+ }
+ }
+
+ private:
+ Condition cond_;
+ const Register& value_;
+ uint64_t mask_;
+};
+
+
+// Test the input and branch if it is non-zero and not a NaN.
+class BranchIfNonZeroNumber : public BranchGenerator {
+ public:
+ BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
+ const FPRegister& scratch)
+ : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
+
+ virtual void Emit(Label* label) const {
+ __ Fabs(scratch_, value_);
+ // Compare with 0.0. Because scratch_ is positive, the result can be one of
+ // nZCv (equal), nzCv (greater) or nzCV (unordered).
+ __ Fcmp(scratch_, 0.0);
+ __ B(gt, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ Fabs(scratch_, value_);
+ __ Fcmp(scratch_, 0.0);
+ __ B(le, label);
+ }
+
+ private:
+ const FPRegister& value_;
+ const FPRegister& scratch_;
+};
+
+
+// Test the input and branch if it is a heap number.
+class BranchIfHeapNumber : public BranchGenerator {
+ public:
+ BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
+ : BranchGenerator(codegen), value_(value) { }
+
+ virtual void Emit(Label* label) const {
+ __ JumpIfHeapNumber(value_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ JumpIfNotHeapNumber(value_, label);
+ }
+
+ private:
+ const Register& value_;
+};
+
+
+// Test the input and branch if it is the specified root value.
+class BranchIfRoot : public BranchGenerator {
+ public:
+ BranchIfRoot(LCodeGen* codegen, const Register& value,
+ Heap::RootListIndex index)
+ : BranchGenerator(codegen), value_(value), index_(index) { }
+
+ virtual void Emit(Label* label) const {
+ __ JumpIfRoot(value_, index_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ JumpIfNotRoot(value_, index_, label);
+ }
+
+ private:
+ const Register& value_;
+ const Heap::RootListIndex index_;
+};
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
+ }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal, zone());
+ return result;
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
+ if (!environment->HasBeenRegistered()) {
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ ASSERT(instr != NULL);
+
+ Assembler::BlockPoolsScope scope(masm_);
+ __ Call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+ if ((code->kind() == Code::BINARY_OP_IC) ||
+ (code->kind() == Code::COMPARE_IC)) {
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+ }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).Is(x1));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->constructor()).is(x1));
+
+ __ Mov(x0, instr->arity());
+ // No cell in x2 for construct type feedback in optimized code.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->constructor()).is(x1));
+
+ __ Mov(x0, Operand(instr->arity()));
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ B(&done);
+ __ Bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ Bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ }
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
+ ASSERT(instr != NULL);
+
+ __ CallRuntime(function, num_arguments, save_doubles);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Mov(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ Ldr(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadHeapObject(cp,
+ Handle<HeapObject>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ ASSERT(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint = safepoints_.DefineSafepoint(
+ masm(), kind, arguments, deopt_mode);
+
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+
+ if (kind & Safepoint::kWithRegisters) {
+ // Register cp always contains a pointer to the context.
+ safepoint.DefinePointerRegister(cp, zone());
+ }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+ LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
+}
+
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ int count = 0;
+ while (!iterator.Done()) {
+ // TODO(all): Is this supposed to save just the callee-saved doubles? It
+ // looks like it's saving all of them.
+ FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ __ Poke(value, count * kDoubleSize);
+ iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ int count = 0;
+ while (!iterator.Done()) {
+ // TODO(all): Is this supposed to restore just the callee-saved doubles? It
+ // looks like it's restoring all of them.
+ FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ __ Peek(value, count * kDoubleSize);
+ iterator.Advance();
+ count++;
+ }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+ // TODO(all): Add support for stop_t FLAG in DEBUG mode.
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
+ Label ok;
+ int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
+ __ Peek(x10, receiver_offset);
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Poke(x10, receiver_offset);
+
+ __ Bind(&ok);
+ }
+ }
+
+ ASSERT(__ StackPointer().Is(jssp));
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ __ Claim(slots, kPointerSize);
+ }
+
+ if (info()->saves_caller_doubles()) {
+ SaveCallerDoubles();
+ }
+
+ // Allocate a local context if needed.
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is in x1.
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+ // Context is returned in x0. It replaces the context passed to us. It's
+ // saved in the stack and kept live in cp.
+ __ Mov(cp, x0);
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ Register value = x0;
+ Register scratch = x3;
+
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ Ldr(value, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ Str(value, target);
+ // Update the write barrier. This clobbers value and scratch.
+ __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
+ GetLinkRegisterState(), kSaveFPRegs);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ Claim(slots);
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
+ LDeferredCode* code = deferred_[i];
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+
+ __ Bind(code->entry());
+
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ Push(lr, fp, cp);
+ __ Mov(fp, Smi::FromInt(StackFrame::STUB));
+ __ Push(fp);
+ __ Add(fp, __ StackPointer(),
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ Comment(";;; Deferred code");
+ }
+
+ code->Generate();
+
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ __ Pop(xzr, cp, fp, lr);
+ frame_is_built_ = false;
+ }
+
+ __ B(code->exit());
+ }
+ }
+
+ // Force constant pool emission at the end of the deferred code to make
+ // sure that no constant pools are emitted after deferred code because
+ // deferred code generation is the last step which generates code. The two
+ // following steps will only output data used by crakshaft.
+ masm()->CheckConstPool(true, false);
+
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+ if (deopt_jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
+ Label table_start;
+ __ bind(&table_start);
+ Label needs_frame;
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
+ __ Bind(&deopt_jump_table_[i]->label);
+ Address entry = deopt_jump_table_[i]->address;
+ Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (deopt_jump_table_[i]->needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
+
+ UseScratchRegisterScope temps(masm());
+ Register stub_deopt_entry = temps.AcquireX();
+ Register stub_marker = temps.AcquireX();
+
+ __ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry));
+ if (needs_frame.is_bound()) {
+ __ B(&needs_frame);
+ } else {
+ __ Bind(&needs_frame);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
+ __ Push(lr, fp, cp, stub_marker);
+ __ Add(fp, __ StackPointer(), 2 * kPointerSize);
+ __ Call(stub_deopt_entry);
+ }
+ } else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ masm()->CheckConstPool(false, false);
+ }
+
+ // Force constant pool emission at the end of the deopt jump table to make
+ // sure that no constant pools are emitted after.
+ masm()->CheckConstPool(true, false);
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ // We do not know how much data will be emitted for the safepoint table, so
+ // force emission of the veneer pool.
+ masm()->CheckVeneerPool(true, true);
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
+ PopulateDeoptimizationData(code);
+ info()->CommitDependencies(code);
+}
+
+
+void LCodeGen::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+
+ Handle<DeoptimizationInputData> data =
+ factory()->NewDeoptimizationInputData(length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+
+ code->set_deoptimization_data(*data);
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length(); i < length; i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::DeoptimizeBranch(
+ LEnvironment* environment,
+ BranchType branch_type, Register reg, int bit,
+ Deoptimizer::BailoutType* override_bailout_type) {
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ Deoptimizer::BailoutType bailout_type =
+ info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+
+ if (override_bailout_type != NULL) {
+ bailout_type = *override_bailout_type;
+ }
+
+ ASSERT(environment->HasBeenRegistered());
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ int id = environment->deoptimization_index();
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ }
+
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Label not_zero;
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+
+ __ Push(x0, x1, x2);
+ __ Mrs(x2, NZCV);
+ __ Mov(x0, count);
+ __ Ldr(w1, MemOperand(x0));
+ __ Subs(x1, x1, 1);
+ __ B(gt, &not_zero);
+ __ Mov(w1, FLAG_deopt_every_n_times);
+ __ Str(w1, MemOperand(x0));
+ __ Pop(x2, x1, x0);
+ ASSERT(frame_is_built_);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ Unreachable();
+
+ __ Bind(&not_zero);
+ __ Str(w1, MemOperand(x0));
+ __ Msr(NZCV, x2);
+ __ Pop(x2, x1, x0);
+ }
+
+ if (info()->ShouldTrapOnDeopt()) {
+ Label dont_trap;
+ __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
+ __ Debug("trap_on_deopt", __LINE__, BREAK);
+ __ Bind(&dont_trap);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ // Go through jump table if we need to build frame, or restore caller doubles.
+ if (branch_type == always &&
+ frame_is_built_ && !info()->saves_caller_doubles()) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last()->address != entry) ||
+ (deopt_jump_table_.last()->bailout_type != bailout_type) ||
+ (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
+ Deoptimizer::JumpTableEntry* table_entry =
+ new(zone()) Deoptimizer::JumpTableEntry(entry,
+ bailout_type,
+ !frame_is_built_);
+ deopt_jump_table_.Add(table_entry, zone());
+ }
+ __ B(&deopt_jump_table_.last()->label,
+ branch_type, reg, bit);
+ }
+}
+
+
+void LCodeGen::Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type) {
+ DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
+ DeoptimizeBranch(environment, static_cast<BranchType>(cond));
+}
+
+
+void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_zero, rt);
+}
+
+
+void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_not_zero, rt);
+}
+
+
+void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
+ int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
+ DeoptimizeIfBitSet(rt, sign_bit, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfSmi(Register rt,
+ LEnvironment* environment) {
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
+}
+
+
+void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
+}
+
+
+void LCodeGen::DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment) {
+ __ CompareRoot(rt, index);
+ DeoptimizeIf(eq, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment) {
+ __ CompareRoot(rt, index);
+ DeoptimizeIf(ne, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
+ LEnvironment* environment) {
+ __ TestForMinusZero(input);
+ DeoptimizeIf(vs, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfBitSet(Register rt,
+ int bit,
+ LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_bit_set, rt, bit);
+}
+
+
+void LCodeGen::DeoptimizeIfBitClear(Register rt,
+ int bit,
+ LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ intptr_t current_pc = masm()->pc_offset();
+
+ if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+ ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT((padding_size % kInstructionSize) == 0);
+ InstructionAccurateScope instruction_accurate(
+ masm(), padding_size / kInstructionSize);
+
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= kInstructionSize;
+ }
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ // TODO(all): support zero register results, as ToRegister32.
+ ASSERT((op != NULL) && op->IsRegister());
+ return Register::FromAllocationIndex(op->index());
+}
+
+
+Register LCodeGen::ToRegister32(LOperand* op) const {
+ ASSERT(op != NULL);
+ if (op->IsConstantOperand()) {
+ // If this is a constant operand, the result must be the zero register.
+ ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
+ return wzr;
+ } else {
+ return ToRegister(op).W();
+ }
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ ASSERT((op != NULL) && op->IsDoubleRegister());
+ return DoubleRegister::FromAllocationIndex(op->index());
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ ASSERT(op != NULL);
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
+ } else if (r.IsDouble()) {
+ Abort(kToOperandUnsupportedDoubleImmediate);
+ }
+ ASSERT(r.IsTagged());
+ return Operand(constant->handle(isolate()));
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand(0);
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+Operand LCodeGen::ToOperand32I(LOperand* op) {
+ return ToOperand32(op, SIGNED_INT32);
+}
+
+
+Operand LCodeGen::ToOperand32U(LOperand* op) {
+ return ToOperand32(op, UNSIGNED_INT32);
+}
+
+
+Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
+ ASSERT(op != NULL);
+ if (op->IsRegister()) {
+ return Operand(ToRegister32(op));
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return Operand(signedness == SIGNED_INT32
+ ? constant->Integer32Value()
+ : static_cast<uint32_t>(constant->Integer32Value()));
+ } else {
+ // Other constants not implemented.
+ Abort(kToOperand32UnsupportedImmediate);
+ }
+ }
+ // Other cases are not implemented.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+ ASSERT(op != NULL);
+ ASSERT(!op->IsRegister());
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(masm()->StackPointer(),
+ ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return constant->Integer32Value();
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = nv;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? lo : lt;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? hi : gt;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? ls : le;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? hs : ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ branch.Emit(chunk_->GetAssemblyLabel(left_block));
+ } else {
+ branch.Emit(chunk_->GetAssemblyLabel(left_block));
+ __ B(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
+ ASSERT((condition != al) && (condition != nv));
+ BranchOnCondition branch(this, condition);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs) {
+ ASSERT((condition != al) && (condition != nv));
+ CompareAndBranch branch(this, condition, lhs, rhs);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask) {
+ ASSERT((condition != al) && (condition != nv));
+ TestAndBranch branch(this, condition, value, mask);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch) {
+ BranchIfNonZeroNumber branch(this, value, scratch);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value) {
+ BranchIfHeapNumber branch(this, value);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index) {
+ BranchIfRoot branch(this, value, index);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) {
+ resolver_.Resolve(move);
+ }
+ }
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+
+ // The pointer to the arguments array come from DoArgumentsElements.
+ // It does not point directly to the arguments and there is an offest of
+ // two words that we must take into account when accessing an argument.
+ // Subtracting the index from length accounts for one, so we add one more.
+
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int offset = ((length - index) + 1) * kPointerSize;
+ __ Ldr(result, MemOperand(arguments, offset));
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister32(instr->length());
+ int index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = index - 1;
+ if (loc != 0) {
+ __ Sub(result.W(), length, loc);
+ __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
+ } else {
+ __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
+ }
+ } else {
+ Register length = ToRegister32(instr->length());
+ Operand index = ToOperand32I(instr->index());
+ __ Sub(result.W(), length, index);
+ __ Add(result.W(), result.W(), 1);
+ __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
+ }
+}
+
+
+void LCodeGen::DoAddE(LAddE* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = (instr->right()->IsConstantOperand())
+ ? ToInteger32(LConstantOperand::cast(instr->right()))
+ : Operand(ToRegister32(instr->right()), SXTW);
+
+ ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
+ __ Add(result, left, right);
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+ if (can_overflow) {
+ __ Adds(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoAddS(LAddS* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+ if (can_overflow) {
+ __ Adds(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
+ } else {
+ __ B(deferred->entry());
+ }
+ } else {
+ Register size = ToRegister32(instr->size());
+ __ Sxtw(size.X(), size);
+ __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
+ }
+
+ __ Bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ Register filler_count = temp1;
+ Register filler = temp2;
+ Register untagged_result = ToRegister(instr->temp3());
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Mov(filler_count, size / kPointerSize);
+ } else {
+ __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
+ }
+
+ __ Sub(untagged_result, result, kHeapObjectTag);
+ __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ FillFields(untagged_result, filler_count, filler);
+ } else {
+ ASSERT(instr->temp3() == NULL);
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // We're in a SafepointRegistersScope so we can use any scratch registers.
+ Register size = x0;
+ if (instr->size()->IsConstantOperand()) {
+ __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
+ } else {
+ __ SmiTag(size, ToRegister32(instr->size()).X());
+ }
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ Mov(x10, Smi::FromInt(flags));
+ __ Push(size, x10);
+
+ CallRuntimeFromDeferred(
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister32(instr->length());
+
+ Register elements = ToRegister(instr->elements());
+ Register scratch = x5;
+ ASSERT(receiver.Is(x0)); // Used for parameter count.
+ ASSERT(function.Is(x1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ ASSERT(instr->IsMarkedAsCall());
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ Cmp(length, kArgumentsLimit);
+ DeoptimizeIf(hi, instr->environment());
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ Push(receiver);
+ Register argc = receiver;
+ receiver = NoReg;
+ __ Sxtw(argc, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ Add(elements, elements, 1 * kPointerSize);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ Cbz(length, &invoke);
+ __ Bind(&loop);
+ __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
+ __ Push(scratch);
+ __ Subs(length, length, 1);
+ __ B(ne, &loop);
+
+ __ Bind(&invoke);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+ // The number of arguments is stored in argc (receiver) which is x0, as
+ // expected by InvokeFunction.
+ ParameterCount actual(argc);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ // When we are inside an inlined function, the arguments are the last things
+ // that have been pushed on the stack. Therefore the arguments array can be
+ // accessed directly from jssp.
+ // However in the normal case, it is accessed via fp but there are two words
+ // on the stack between fp and the arguments (the saved lr and fp) and the
+ // LAccessArgumentsAt implementation take that into account.
+ // In the inlined case we need to subtract the size of 2 words to jssp to
+ // get a pointer which will work well with LAccessArgumentsAt.
+ ASSERT(masm()->StackPointer().Is(jssp));
+ __ Sub(result, jssp, 2 * kPointerSize);
+ } else {
+ ASSERT(instr->temp() != NULL);
+ Register previous_fp = ToRegister(instr->temp());
+
+ __ Ldr(previous_fp,
+ MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(result,
+ MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
+ __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Csel(result, fp, previous_fp, ne);
+ }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister32(instr->result());
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ Cmp(fp, elements);
+ __ Mov(result, scope()->num_parameters());
+ __ B(eq, &done);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(result,
+ UntagSmiMemOperand(result.X(),
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ // Argument length is in result register.
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ switch (instr->op()) {
+ case Token::ADD: __ Fadd(result, left, right); break;
+ case Token::SUB: __ Fsub(result, left, right); break;
+ case Token::MUL: __ Fmul(result, left, right); break;
+ case Token::DIV: __ Fdiv(result, left, right); break;
+ case Token::MOD: {
+ // The ECMA-262 remainder operator is the remainder from a truncating
+ // (round-towards-zero) division. Note that this differs from IEEE-754.
+ //
+ // TODO(jbramley): See if it's possible to do this inline, rather than by
+ // calling a helper function. With frintz (to produce the intermediate
+ // quotient) and fmsub (to calculate the remainder without loss of
+ // precision), it should be possible. However, we would need support for
+ // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
+ // support that yet.
+ ASSERT(left.Is(d0));
+ ASSERT(right.Is(d1));
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ ASSERT(result.Is(d0));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).is(x1));
+ ASSERT(ToRegister(instr->right()).is(x0));
+ ASSERT(ToRegister(instr->result()).is(x0));
+
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32U(instr->right());
+
+ switch (instr->op()) {
+ case Token::BIT_AND: __ And(result, left, right); break;
+ case Token::BIT_OR: __ Orr(result, left, right); break;
+ case Token::BIT_XOR: __ Eor(result, left, right); break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoBitS(LBitS* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+
+ switch (instr->op()) {
+ case Token::BIT_AND: __ And(result, left, right); break;
+ case Token::BIT_OR: __ Orr(result, left, right); break;
+ case Token::BIT_XOR: __ Eor(result, left, right); break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed);
+ } else {
+ DeoptimizeIf(cc, check->environment());
+ }
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
+ if (instr->hydrogen()->skip_check()) return;
+
+ ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
+ Register length = ToRegister32(instr->length());
+
+ if (instr->index()->IsConstantOperand()) {
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
+ __ Cmp(length, Smi::FromInt(constant_index));
+ } else {
+ __ Cmp(length, constant_index);
+ }
+ } else {
+ ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
+ __ Cmp(length, ToRegister32(instr->index()));
+ }
+ Condition condition = instr->hydrogen()->allow_equality() ? lo : ls;
+ ApplyCheckIf(condition, instr);
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+
+ if (r.IsInteger32()) {
+ ASSERT(!info()->IsStub());
+ EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
+ } else if (r.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ STATIC_ASSERT(kSmiTag == 0);
+ EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
+ } else if (r.IsDouble()) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ // Test the double value. Zero and NaN are false.
+ EmitBranchIfNonZeroNumber(instr, value, double_scratch());
+ } else {
+ ASSERT(r.IsTagged());
+ Register value = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+
+ if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
+ __ CompareRoot(value, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
+ } else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitGoto(instr->TrueDestination(chunk()));
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ __ Ldr(double_scratch(), FieldMemOperand(value,
+ HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ Register temp = ToRegister(instr->temp1());
+ __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
+ EmitCompareAndBranch(instr, ne, temp, 0);
+ } else {
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ JumpIfRoot(
+ value, Heap::kUndefinedValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ JumpIfRoot(
+ value, Heap::kTrueValueRootIndex, true_label);
+ __ JumpIfRoot(
+ value, Heap::kFalseValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ JumpIfRoot(
+ value, Heap::kNullValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ ASSERT(Smi::FromInt(0) == 0);
+ __ Cbz(value, false_label);
+ __ JumpIfSmi(value, true_label);
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a smi, deopt.
+ DeoptimizeIfSmi(value, instr->environment());
+ }
+
+ Register map = NoReg;
+ Register scratch = NoReg;
+
+ if (expected.NeedsMap()) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ map = ToRegister(instr->temp1());
+ scratch = ToRegister(instr->temp2());
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ scratch, 1 << Map::kIsUndetectable, false_label);
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, true_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
+ __ B(ge, &not_string);
+ __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
+ __ Cbz(scratch, false_label);
+ __ B(true_label);
+ __ Bind(&not_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
+ __ B(eq, true_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ Label not_heap_number;
+ __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
+
+ __ Ldr(double_scratch(),
+ FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ Fcmp(double_scratch(), 0.0);
+ // If we got a NaN (overflow bit is set), jump to the false branch.
+ __ B(vs, false_label);
+ __ B(eq, false_label);
+ __ B(true_label);
+ __ Bind(&not_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ Deoptimize(instr->environment());
+ }
+ }
+ }
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ Register function_reg) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ // The function interface relies on the following register assignments.
+ ASSERT(function_reg.Is(x1) || function_reg.IsNone());
+ Register arity_reg = x0;
+
+ LPointerMap* pointers = instr->pointer_map();
+
+ // If necessary, load the function object.
+ if (function_reg.IsNone()) {
+ function_reg = x1;
+ __ LoadObject(function_reg, function);
+ }
+
+ if (FLAG_debug_code) {
+ Label is_not_smi;
+ // Try to confirm that function_reg (x1) is a tagged pointer.
+ __ JumpIfNotSmi(function_reg, &is_not_smi);
+ __ Abort(kExpectedFunctionObject);
+ __ Bind(&is_not_smi);
+ }
+
+ if (can_invoke_directly) {
+ // Change context.
+ __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+ // Set the arguments count if adaption is not needed. Assumes that x0 is
+ // available to write to at this point.
+ if (dont_adapt_arguments) {
+ __ Mov(arity_reg, arity);
+ }
+
+ // Invoke function.
+ __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->result()).Is(x0));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ // TODO(all): on ARM we use a call descriptor to specify a storage mode
+ // but on ARM64 we only have one storage mode so it isn't necessary. Check
+ // this understanding is correct.
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(target);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->function()).is(x1));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ Mov(x0, Operand(instr->arity()));
+ }
+
+ // Change context.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // Load the code entry address
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->result()).is(x0));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Register temp = ToRegister(instr->temp());
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object);
+ __ Mov(cp, 0);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, temp);
+ }
+ DeoptimizeIfSmi(temp, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps: public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->CanOmitMapChecks()) {
+ ASSERT(instr->value() == NULL);
+ ASSERT(instr->temp() == NULL);
+ return;
+ }
+
+ Register object = ToRegister(instr->value());
+ Register map_reg = ToRegister(instr->temp());
+
+ __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, object);
+ __ Bind(deferred->check_maps());
+ }
+
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ Label success;
+ for (int i = 0; i < map_set.size(); i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMap(map_reg, map);
+ __ B(eq, &success);
+ }
+
+ // We didn't match a map.
+ if (instr->hydrogen()->has_migration_target()) {
+ __ B(deferred->entry());
+ } else {
+ Deoptimize(instr->environment());
+ }
+
+ __ Bind(&success);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
+ }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ Register value = ToRegister(instr->value());
+ ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
+ DeoptimizeIfNotSmi(value, instr->environment());
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first, last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ Cmp(scratch, first);
+ if (first == last) {
+ // If there is only one type in the interval check for equality.
+ DeoptimizeIf(ne, instr->environment());
+ } else if (last == LAST_TYPE) {
+ // We don't need to compare with the higher bound of the interval.
+ DeoptimizeIf(lo, instr->environment());
+ } else {
+ // If we are below the lower bound, set the C flag and clear the Z flag
+ // to force a deopt.
+ __ Ccmp(scratch, last, CFlag, hs);
+ DeoptimizeIf(hi, instr->environment());
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT((tag == 0) || (tag == mask));
+ if (tag == 0) {
+ DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
+ } else {
+ DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
+ }
+ } else {
+ if (tag == 0) {
+ __ Tst(scratch, mask);
+ } else {
+ __ And(scratch, scratch, mask);
+ __ Cmp(scratch, tag);
+ }
+ DeoptimizeIf(ne, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ __ ClampDoubleToUint8(result, input, double_scratch());
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register input = ToRegister32(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ __ ClampInt32ToUint8(result, input);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register input = ToRegister(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Label done;
+
+ // Both smi and heap number cases are handled.
+ Label is_not_smi;
+ __ JumpIfNotSmi(input, &is_not_smi);
+ __ SmiUntag(result.X(), input);
+ __ ClampInt32ToUint8(result);
+ __ B(&done);
+
+ __ Bind(&is_not_smi);
+
+ // Check for heap number.
+ Label is_heap_number;
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
+
+ // Check for undefined. Undefined is coverted to zero for clamping conversion.
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+ __ Mov(result, 0);
+ __ B(&done);
+
+ // Heap number case.
+ __ Bind(&is_heap_number);
+ DoubleRegister dbl_scratch = double_scratch();
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
+ __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ Fmov(result_reg, value_reg);
+ __ Mov(result_reg, Operand(result_reg, LSR, 32));
+ } else {
+ __ Fmov(result_reg.W(), value_reg.S());
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ Register temp = ToRegister(instr->temp());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+
+ __ And(temp, lo_reg, Operand(0xffffffff));
+ __ Orr(temp, temp, Operand(hi_reg, LSL, 32));
+ __ Fmov(result_reg, temp);
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Handle<String> class_name = instr->hydrogen()->class_name();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Register input = ToRegister(instr->value());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(input, false_label);
+
+ Register map = scratch2;
+ if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+ // We expect CompareObjectType to load the object instance type in scratch1.
+ __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, false_label);
+ __ B(eq, true_label);
+ __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
+ __ B(eq, true_label);
+ } else {
+ __ IsObjectJSObjectType(input, map, scratch1, false_label);
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
+
+ // Objects with a non-function constructor have class 'Object'.
+ if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
+ __ JumpIfNotObjectType(
+ scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
+ } else {
+ __ JumpIfNotObjectType(
+ scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
+ }
+
+ // The constructor function is in scratch1. Get its instance class name.
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ FPRegister object = ToDoubleRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+
+ // If we don't have a NaN, we don't have the hole, so branch now to avoid the
+ // (relatively expensive) hole-NaN check.
+ __ Fcmp(object, object);
+ __ B(vc, instr->FalseLabel(chunk_));
+
+ // We have a NaN, but is it the hole?
+ __ Fmov(temp, object);
+ EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
+ ASSERT(instr->hydrogen()->representation().IsTagged());
+ Register object = ToRegister(instr->object());
+
+ EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register value = ToRegister(instr->value());
+ Register map = ToRegister(instr->temp());
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
+ instr->TrueLabel(chunk()));
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
+ }
+ EmitGoto(instr->FalseDestination(chunk()));
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ Condition cond = TokenToCondition(instr->op(), false);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ if (right->IsConstantOperand()) {
+ __ Fcmp(ToDoubleRegister(left),
+ ToDouble(LConstantOperand::cast(right)));
+ } else if (left->IsConstantOperand()) {
+ // Transpose the operands and reverse the condition.
+ __ Fcmp(ToDoubleRegister(right),
+ ToDouble(LConstantOperand::cast(left)));
+ cond = ReverseConditionForCmp(cond);
+ } else {
+ __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+ }
+
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to false block label.
+ __ B(vs, instr->FalseLabel(chunk_));
+ EmitBranch(instr, cond);
+ } else {
+ if (instr->hydrogen_value()->representation().IsInteger32()) {
+ if (right->IsConstantOperand()) {
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister32(left),
+ ToOperand32I(right));
+ } else {
+ // Transpose the operands and reverse the condition.
+ EmitCompareAndBranch(instr,
+ ReverseConditionForCmp(cond),
+ ToRegister32(right),
+ ToOperand32I(left));
+ }
+ } else {
+ ASSERT(instr->hydrogen_value()->representation().IsSmi());
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister(left),
+ Operand(Smi::FromInt(value)));
+ } else if (left->IsConstantOperand()) {
+ // Transpose the operands and reverse the condition.
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ EmitCompareAndBranch(instr,
+ ReverseConditionForCmp(cond),
+ ToRegister(right),
+ Operand(Smi::FromInt(value)));
+ } else {
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister(left),
+ ToRegister(right));
+ }
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+ EmitCompareAndBranch(instr, eq, left, right);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+ Condition cond = TokenToCondition(op, false);
+
+ ASSERT(ToRegister(instr->left()).Is(x1));
+ ASSERT(ToRegister(instr->right()).Is(x0));
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // Signal that we don't inline smi code before this stub.
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+
+ // Return true or false depending on CompareIC result.
+ // This instruction is marked as call. We can clobber any register.
+ ASSERT(instr->IsMarkedAsCall());
+ __ LoadTrueFalseRoots(x1, x2);
+ __ Cmp(x0, 0);
+ __ Csel(ToRegister(instr->result()), x1, x2, cond);
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ ASSERT(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fmov(result, instr->value());
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ ASSERT(is_int32(instr->value()));
+ // Cast the value here to ensure that the value isn't sign extended by the
+ // implicit Operand constructor.
+ __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> value = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ LoadObject(ToRegister(instr->result()), value);
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
+ }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Register reg = ToRegister(instr->value());
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ UseScratchRegisterScope temps(masm());
+ Register temp = temps.AcquireX();
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ Mov(temp, Operand(Handle<Object>(cell)));
+ __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
+ __ Cmp(reg, temp);
+ } else {
+ __ Cmp(reg, Operand(object));
+ }
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->date());
+ Register result = ToRegister(instr->result());
+ Register temp1 = x10;
+ Register temp2 = x11;
+ Smi* index = instr->index();
+ Label runtime, done, deopt, obj_ok;
+
+ ASSERT(object.is(result) && object.Is(x0));
+ ASSERT(instr->IsMarkedAsCall());
+
+ __ JumpIfSmi(object, &deopt);
+ __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
+ __ B(eq, &obj_ok);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&obj_ok);
+ if (index->value() == 0) {
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ Mov(temp1, Operand(stamp));
+ __ Ldr(temp1, MemOperand(temp1));
+ __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Cmp(temp1, temp2);
+ __ B(ne, &runtime);
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ B(&done);
+ }
+
+ __ Bind(&runtime);
+ __ Mov(x1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ }
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
+ type = Deoptimizer::LAZY;
+ }
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ Deoptimize(instr->environment(), &type);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ Cmp(dividend, 0);
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ Cmp(dividend, kMinInt);
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ Tst(dividend, mask);
+ DeoptimizeIf(ne, instr->environment());
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ Neg(result, dividend);
+ return;
+ }
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ Mov(result, dividend);
+ } else if (shift == 1) {
+ __ Add(result, dividend, Operand(dividend, LSR, 31));
+ } else {
+ __ Mov(result, Operand(dividend, ASR, 31));
+ __ Add(result, dividend, Operand(result, LSR, 32 - shift));
+ }
+ if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
+ if (divisor < 0) __ Neg(result, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ ASSERT(!AreAliased(dividend, result));
+
+ if (divisor == 0) {
+ Deoptimize(instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIfZero(dividend, instr->environment());
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ Register temp = ToRegister32(instr->temp());
+ ASSERT(!AreAliased(dividend, result, temp));
+ __ Sxtw(dividend.X(), dividend);
+ __ Mov(temp, divisor);
+ __ Smsubl(temp.X(), result, temp, dividend.X());
+ DeoptimizeIfNotZero(temp, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister32(instr->left());
+ Register divisor = ToRegister32(instr->right());
+ Register result = ToRegister32(instr->result());
+
+ // Issue the division first, and then check for any deopt cases whilst the
+ // result is computed.
+ __ Sdiv(result, dividend, divisor);
+
+ if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ ASSERT_EQ(NULL, instr->temp());
+ return;
+ }
+
+ Label deopt;
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ Cbz(divisor, &deopt);
+ }
+
+ // Check for (0 / -x) as that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(divisor, 0);
+
+ // If the divisor < 0 (mi), compare the dividend, and deopt if it is
+ // zero, ie. zero dividend with negative divisor deopts.
+ // If the divisor >= 0 (pl, the opposite of mi) set the flags to
+ // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
+ __ Ccmp(dividend, 0, NoFlag, mi);
+ __ B(eq, &deopt);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ // Test dividend for kMinInt by subtracting one (cmp) and checking for
+ // overflow.
+ __ Cmp(dividend, 1);
+ // If overflow is set, ie. dividend = kMinInt, compare the divisor with
+ // -1. If overflow is clear, set the flags for condition ne, as the
+ // dividend isn't -1, and thus we shouldn't deopt.
+ __ Ccmp(divisor, -1, NoFlag, vs);
+ __ B(eq, &deopt);
+ }
+
+ // Compute remainder and deopt if it's not zero.
+ Register remainder = ToRegister32(instr->temp());
+ __ Msub(remainder, result, divisor, dividend);
+ __ Cbnz(remainder, &deopt);
+
+ Label div_ok;
+ __ B(&div_ok);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&div_ok);
+}
+
+
+void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister32(instr->result());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIfMinusZero(input, instr->environment());
+ }
+
+ __ TryConvertDoubleToInt32(result, input, double_scratch());
+ DeoptimizeIf(ne, instr->environment());
+
+ if (instr->tag_result()) {
+ __ SmiTag(result.X());
+ }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+ __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // FunctionLiteral instruction is marked as call, we can trash any register.
+ ASSERT(instr->IsMarkedAsCall());
+
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->is_generator());
+ __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
+ __ Mov(x1, Operand(pretenure ? factory()->true_value()
+ : factory()->false_value()));
+ __ Push(cp, x2, x1);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+
+ __ EnumLengthUntagged(result, map);
+ __ Cbnz(result, &load_cache);
+
+ __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ B(&done);
+
+ __ Bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ DeoptimizeIfZero(result, instr->environment());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ Register object = ToRegister(instr->object());
+ Register null_value = x5;
+
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(object.Is(x0));
+
+ Label deopt;
+
+ __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt);
+
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Cmp(object, null_value);
+ __ B(eq, &deopt);
+
+ __ JumpIfSmi(object, &deopt);
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
+ __ B(le, &deopt);
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
+
+ __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ B(&use_cache);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ // Get the set of properties to enumerate.
+ __ Bind(&call_runtime);
+ __ Push(object);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
+
+ __ Bind(&use_cache);
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ // Assert that we can use a W register load to get the hash.
+ ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
+ __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ // Do not emit jump if we are emitting a goto to the next block.
+ if (!IsNextEmittedBlock(block)) {
+ __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ EmitGoto(instr->block_id());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister32(instr->temp());
+
+ // Assert that the cache status bits fit in a W register.
+ ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
+ __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
+ __ Tst(temp, String::kContainsCachedArrayIndexMask);
+ EmitBranch(instr, eq);
+}
+
+
+// HHasInstanceTypeAndBranch instruction is built with an interval of type
+// to test but is only used in very restricted ways. The only possible kinds
+// of intervals are:
+// - [ FIRST_TYPE, instr->to() ]
+// - [ instr->form(), LAST_TYPE ]
+// - instr->from() == instr->to()
+//
+// These kinds of intervals can be check with only one compare instruction
+// providing the correct value and test condition are used.
+//
+// TestType() will return the value to use in the compare instruction and
+// BranchCondition() will return the condition to use depending on the kind
+// of interval actually specified in the instruction.
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT((from == to) || (to == LAST_TYPE));
+ return from;
+}
+
+
+// See comment above TestType function for what this function does.
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return hs;
+ if (from == FIRST_TYPE) return ls;
+ UNREACHABLE();
+ return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ __ Add(result, base, ToOperand32I(instr->offset()));
+ } else {
+ __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
+ }
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // Assert that the arguments are in the registers expected by InstanceofStub.
+ ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
+ ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+
+ // InstanceofStub returns a result in x0:
+ // 0 => not an instance
+ // smi 1 => instance.
+ __ Cmp(x0, 0);
+ __ LoadTrueFalseRoots(x0, x1);
+ __ Csel(x0, x0, x1, eq);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred =
+ new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label map_check, return_false, cache_miss, done;
+ Register object = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ // x4 is expected in the associated deferred code and stub.
+ Register map_check_site = x4;
+ Register map = x5;
+
+ // This instruction is marked as call. We can clobber any register.
+ ASSERT(instr->IsMarkedAsCall());
+
+ // We must take into account that object is in x11.
+ ASSERT(object.Is(x11));
+ Register scratch = x10;
+
+ // A Smi is not instance of anything.
+ __ JumpIfSmi(object, &return_false);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ {
+ // Below we use Factory::the_hole_value() on purpose instead of loading from
+ // the root array to force relocation and later be able to patch with a
+ // custom value.
+ InstructionAccurateScope scope(masm(), 5);
+ __ bind(&map_check);
+ // Will be patched with the cached map.
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+ __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
+ __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ cmp(map, scratch);
+ __ b(&cache_miss, ne);
+ // The address of this instruction is computed relative to the map check
+ // above, so check the size of the code generated.
+ ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
+ // Will be patched with the cached result.
+ __ LoadRelocated(result, Operand(factory()->the_hole_value()));
+ }
+ __ B(&done);
+
+ // The inlined call site cache did not match.
+ // Check null and string before calling the deferred code.
+ __ Bind(&cache_miss);
+ // Compute the address of the map check. It must not be clobbered until the
+ // InstanceOfStub has used it.
+ __ Adr(map_check_site, &map_check);
+ // Null is not instance of anything.
+ __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
+
+ // String values are not instances of anything.
+ // Return false if the object is a string. Otherwise, jump to the deferred
+ // code.
+ // Note that we can't jump directly to deferred code from
+ // IsObjectJSStringType, because it uses tbz for the jump and the deferred
+ // code can be out of range.
+ __ IsObjectJSStringType(object, scratch, NULL, &return_false);
+ __ B(deferred->entry());
+
+ __ Bind(&return_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result is either true or false.
+ __ Bind(deferred->exit());
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ Register result = ToRegister(instr->result());
+ ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+
+ // Prepare InstanceofStub arguments.
+ ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
+ __ LoadObject(InstanceofStub::right(), instr->function());
+
+ InstanceofStub stub(flags);
+ CallCodeGeneric(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+
+ // Put the result value into the result register slot.
+ __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ Register value = ToRegister32(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Scvtf(result, value);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // The function is required to be in x1.
+ ASSERT(ToRegister(instr->function()).is(x1));
+ ASSERT(instr->HasPointerMap());
+
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
+ } else {
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
+ instr->arity(),
+ instr,
+ x1);
+ }
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ // Get the frame pointer for the calling frame.
+ __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+ __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ Bind(&check_frame_marker);
+ __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+
+ EmitCompareAndBranch(
+ instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Label* is_object = instr->TrueLabel(chunk_);
+ Label* is_not_object = instr->FalseLabel(chunk_);
+ Register value = ToRegister(instr->value());
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, is_not_object);
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+ // Check for undetectable objects.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
+
+ // Check that instance type is in object type range.
+ __ IsInstanceJSObjectType(map, scratch, NULL);
+ // Flags have been updated by IsInstanceJSObjectType. We can now test the
+ // flags for "le" condition to check if the object's type is a valid
+ // JS object type.
+ EmitBranch(instr, le);
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+ __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register val = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ Condition true_cond =
+ EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Register value = ToRegister(instr->value());
+ STATIC_ASSERT(kSmiTag == 0);
+ EmitTestAndBranch(instr, eq, value, kSmiTagMask);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+
+ EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
+
+ __ Bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ } else {
+ Label not_the_hole;
+ __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Bind(&not_the_hole);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+ Label deopt;
+
+ // Check that the function really is a function. Leaves map in the result
+ // register.
+ __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt);
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
+ __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ __ Ldr(result, FieldMemOperand(function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ CompareObjectType(result, temp, temp, MAP_TYPE);
+ __ B(ne, &done);
+
+ // Get the prototype from the initial map.
+ __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ __ Bind(&non_instance);
+ __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ __ B(&done);
+
+ // Deoptimize case.
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ // All done.
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+ __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ DeoptimizeIfRoot(
+ result, Heap::kTheHoleValueRootIndex, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->global_object()).Is(x0));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ __ Mov(x2, Operand(instr->name()));
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
+ Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int additional_index) {
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
+ ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
+ : 0;
+
+ if (key_is_constant) {
+ int base_offset = ((constant_key + additional_index) << element_size_shift);
+ return MemOperand(base, base_offset + additional_offset);
+ }
+
+ if (additional_index == 0) {
+ if (key_is_smi) {
+ // Key is smi: untag, and scale by element size.
+ __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
+ return MemOperand(scratch, additional_offset);
+ } else {
+ // Key is not smi, and element size is not byte: scale by element size.
+ if (additional_offset == 0) {
+ return MemOperand(base, key, SXTW, element_size_shift);
+ } else {
+ __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
+ return MemOperand(scratch, additional_offset);
+ }
+ }
+ } else {
+ // TODO(all): Try to combine these cases a bit more intelligently.
+ if (additional_offset == 0) {
+ if (key_is_smi) {
+ __ SmiUntag(scratch, key);
+ __ Add(scratch.W(), scratch.W(), additional_index);
+ } else {
+ __ Add(scratch.W(), key.W(), additional_index);
+ }
+ return MemOperand(base, scratch, LSL, element_size_shift);
+ } else {
+ if (key_is_smi) {
+ __ Add(scratch, base,
+ Operand::UntagSmiAndScale(key, element_size_shift));
+ } else {
+ __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
+ }
+ return MemOperand(
+ scratch,
+ (additional_index << element_size_shift) + additional_offset);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
+ Register ext_ptr = ToRegister(instr->elements());
+ Register scratch;
+ ElementsKind elements_kind = instr->elements_kind();
+
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ int constant_key = 0;
+ if (key_is_constant) {
+ ASSERT(instr->temp() == NULL);
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ scratch = ToRegister(instr->temp());
+ key = ToRegister(instr->key());
+ }
+
+ MemOperand mem_op =
+ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+ key_is_constant, constant_key,
+ elements_kind,
+ instr->additional_index());
+
+ if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
+ (elements_kind == FLOAT32_ELEMENTS)) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result.S(), mem_op);
+ __ Fcvt(result, result.S());
+ } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
+ (elements_kind == FLOAT64_ELEMENTS)) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result, mem_op);
+ } else {
+ Register result = ToRegister(instr->result());
+
+ switch (elements_kind) {
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ Ldrsb(result, mem_op);
+ break;
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ Ldrb(result, mem_op);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ Ldrsh(result, mem_op);
+ break;
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ Ldrh(result, mem_op);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ Ldrsw(result, mem_op);
+ break;
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ Ldr(result.W(), mem_op);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ // Deopt if value > 0x80000000.
+ __ Tst(result, 0xFFFFFFFF80000000);
+ DeoptimizeIf(ne, instr->environment());
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind) {
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+
+ // Even though the HLoad/StoreKeyed instructions force the input
+ // representation for the key to be an integer, the input gets replaced during
+ // bounds check elimination with the index argument to the bounds check, which
+ // can be tagged, so that case must be handled here, too.
+ if (key_is_tagged) {
+ __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
+ } else {
+ // Sign extend key because it could be a 32-bit negative value or contain
+ // garbage in the top 32-bits. The address computation happens in 64-bit.
+ ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
+ __ Add(base, elements, Operand(key, SXTW, element_size_shift));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
+ Register elements = ToRegister(instr->elements());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register load_base;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
+ (instr->temp() == NULL));
+
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+ instr->additional_index());
+ load_base = elements;
+ } else {
+ load_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ Ldr(result, FieldMemOperand(load_base, offset));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register scratch = ToRegister(instr->temp());
+
+ // TODO(all): Is it faster to reload this value to an integer register, or
+ // move from fp to integer?
+ __ Fmov(scratch, result);
+ __ Cmp(scratch, kHoleNanInt64);
+ DeoptimizeIf(eq, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register load_base;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(instr->temp() == NULL);
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ load_base = elements;
+ } else {
+ load_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ Representation representation = instr->hydrogen()->representation();
+
+ if (representation.IsInteger32() &&
+ instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
+ Representation::Integer32());
+ } else {
+ __ Load(result, FieldMemOperand(load_base, offset),
+ representation);
+ }
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ DeoptimizeIfNotSmi(result, instr->environment());
+ } else {
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).Is(x1));
+ ASSERT(ToRegister(instr->key()).Is(x0));
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ ASSERT(ToRegister(instr->result()).Is(x0));
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+ Register object = ToRegister(instr->object());
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ __ Load(result, MemOperand(object, offset), access.representation());
+ return;
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ FPRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ Register source;
+ if (access.IsInobject()) {
+ source = object;
+ } else {
+ // Load the properties array, using result as a scratch register.
+ __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ source = result;
+ }
+
+ if (access.representation().IsSmi() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Load(result, UntagSmiFieldMemOperand(source, offset),
+ Representation::Integer32());
+ } else {
+ __ Load(result, FieldMemOperand(source, offset), access.representation());
+ }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
+ ASSERT(ToRegister(instr->object()).is(x0));
+ __ Mov(x2, Operand(instr->name()));
+
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLengthSmi(result, map);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fabs(result, input);
+ } else if (r.IsSmi() || r.IsInteger32()) {
+ Register input = r.IsSmi() ? ToRegister(instr->value())
+ : ToRegister32(instr->value());
+ Register result = r.IsSmi() ? ToRegister(instr->result())
+ : ToRegister32(instr->result());
+ Label done;
+ __ Abs(result, input, NULL, &done);
+ Deoptimize(instr->environment());
+ __ Bind(&done);
+ }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry) {
+ // Handle the tricky cases of MathAbsTagged:
+ // - HeapNumber inputs.
+ // - Negative inputs produce a positive result, so a new HeapNumber is
+ // allocated to hold it.
+ // - Positive inputs are returned as-is, since there is no need to allocate
+ // a new HeapNumber for the result.
+ // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
+ // a smi. In this case, the inline code sets the result and jumps directly
+ // to the allocation_entry label.
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register input = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Register result_bits = ToRegister(instr->temp3());
+ Register result = ToRegister(instr->result());
+
+ Label runtime_allocation;
+
+ // Deoptimize if the input is not a HeapNumber.
+ __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+
+ // If the argument is positive, we can return it as-is, without any need to
+ // allocate a new HeapNumber for the result. We have to do this in integer
+ // registers (rather than with fabs) because we need to be able to distinguish
+ // the two zeroes.
+ __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ Mov(result, input);
+ __ Tbz(result_bits, kXSignBit, exit);
+
+ // Calculate abs(input) by clearing the sign bit.
+ __ Bic(result_bits, result_bits, kXSignMask);
+
+ // Allocate a new HeapNumber to hold the result.
+ // result_bits The bit representation of the (double) result.
+ __ Bind(allocation_entry);
+ __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
+ // The inline (non-deferred) code will store result_bits into result.
+ __ B(exit);
+
+ __ Bind(&runtime_allocation);
+ if (FLAG_debug_code) {
+ // Because result is in the pointer map, we need to make sure it has a valid
+ // tagged value before we call the runtime. We speculatively set it to the
+ // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
+ // be valid.
+ Label result_ok;
+ Register input = ToRegister(instr->value());
+ __ JumpIfSmi(result, &result_ok);
+ __ Cmp(input, result);
+ __ Assert(eq, kUnexpectedValue);
+ __ Bind(&result_ok);
+ }
+
+ { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
+ instr->context());
+ __ StoreToSafepointRegisterSlot(x0, result);
+ }
+ // The inline (non-deferred) code will store result_bits into result.
+}
+
+
+void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTagged: public LDeferredCode {
+ public:
+ DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTagged(instr_, exit(),
+ allocation_entry());
+ }
+ virtual LInstruction* instr() { return instr_; }
+ Label* allocation_entry() { return &allocation; }
+ private:
+ LMathAbsTagged* instr_;
+ Label allocation;
+ };
+
+ // TODO(jbramley): The early-exit mechanism would skip the new frame handling
+ // in GenerateDeferredCode. Tidy this up.
+ ASSERT(!NeedsDeferredFrame());
+
+ DeferredMathAbsTagged* deferred =
+ new(zone()) DeferredMathAbsTagged(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
+ instr->hydrogen()->value()->representation().IsSmi());
+ Register input = ToRegister(instr->value());
+ Register result_bits = ToRegister(instr->temp3());
+ Register result = ToRegister(instr->result());
+ Label done;
+
+ // Handle smis inline.
+ // We can treat smis as 64-bit integers, since the (low-order) tag bits will
+ // never get set by the negation. This is therefore the same as the Integer32
+ // case in DoMathAbs, except that it operates on 64-bit values.
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
+
+ __ JumpIfNotSmi(input, deferred->entry());
+
+ __ Abs(result, input, NULL, &done);
+
+ // The result is the magnitude (abs) of the smallest value a smi can
+ // represent, encoded as a double.
+ __ Mov(result_bits, double_to_rawbits(0x80000000));
+ __ B(deferred->allocation_entry());
+
+ __ Bind(deferred->exit());
+ __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
+ DoubleRegister double_temp2 = double_scratch();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Register temp3 = ToRegister(instr->temp3());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3);
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ // TODO(jbramley): If we could provide a double result, we could use frintm
+ // and produce a valid double result in a single instruction.
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIfMinusZero(input, instr->environment());
+ }
+
+ __ Fcvtms(result, input);
+
+ // Check that the result fits into a 32-bit integer.
+ // - The result did not overflow.
+ __ Cmp(result, Operand(result, SXTW));
+ // - The input was not NaN.
+ __ Fccmp(input, input, NoFlag, eq);
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ Register result = ToRegister32(instr->result());
+ int32_t divisor = instr->divisor();
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ Mov(result, Operand(dividend, ASR, shift));
+ return;
+ }
+
+ // If the divisor is negative, we have to negate and handle edge cases.
+ Label not_kmin_int, done;
+ __ Negs(result, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ if (divisor == -1) {
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ B(vc, &not_kmin_int);
+ __ Mov(result, kMinInt / divisor);
+ __ B(&done);
+ }
+ }
+ __ bind(&not_kmin_int);
+ __ Mov(result, Operand(dividend, ASR, shift));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ ASSERT(!AreAliased(dividend, result));
+
+ if (divisor == 0) {
+ Deoptimize(instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ Cmp(dividend, 0);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister32(instr->temp());
+ ASSERT(!AreAliased(temp, dividend, result));
+ Label needs_adjustment, done;
+ __ Cmp(dividend, 0);
+ __ B(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+ __ B(&done);
+ __ bind(&needs_adjustment);
+ __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+ __ Sub(result, result, Operand(1));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ Register divisor = ToRegister32(instr->divisor());
+ Register remainder = ToRegister32(instr->temp());
+ Register result = ToRegister32(instr->result());
+
+ // This can't cause an exception on ARM, so we can speculatively
+ // execute it already now.
+ __ Sdiv(result, dividend, divisor);
+
+ // Check for x / 0.
+ DeoptimizeIfZero(divisor, instr->environment());
+
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // The V flag will be set iff dividend == kMinInt.
+ __ Cmp(dividend, 1);
+ __ Ccmp(divisor, -1, NoFlag, vs);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(divisor, 0);
+ __ Ccmp(dividend, 0, ZFlag, mi);
+ // "divisor" can't be null because the code would have already been
+ // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
+ // In this case we need to deoptimize to produce a -0.
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ Label done;
+ // If both operands have the same sign then we are done.
+ __ Eor(remainder, dividend, divisor);
+ __ Tbz(remainder, kWSignBit, &done);
+
+ // Check if the result needs to be corrected.
+ __ Msub(remainder, result, divisor, dividend);
+ __ Cbz(remainder, &done);
+ __ Sub(result, result, 1);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ ASSERT(ToDoubleRegister(instr->result()).Is(d0));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister32(instr->value());
+ Register result = ToRegister32(instr->result());
+ __ Clz(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Label done;
+
+ // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
+ // Math.pow(-Infinity, 0.5) == +Infinity
+ // Math.pow(-0.0, 0.5) == +0.0
+
+ // Catch -infinity inputs first.
+ // TODO(jbramley): A constant infinity register would be helpful here.
+ __ Fmov(double_scratch(), kFP64NegativeInfinity);
+ __ Fcmp(double_scratch(), input);
+ __ Fabs(result, input);
+ __ B(&done, eq);
+
+ // Add +0.0 to convert -0.0 to +0.0.
+ __ Fadd(double_scratch(), input, fp_zero);
+ __ Fsqrt(result, double_scratch());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(d1));
+ ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(x11));
+ ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
+ ASSERT(ToDoubleRegister(instr->left()).is(d0));
+ ASSERT(ToDoubleRegister(instr->result()).is(d0));
+
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(x11, &no_deopt);
+ __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+ __ Bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
+ // supports large integer exponents.
+ Register exponent = ToRegister(instr->right());
+ __ Sxtw(exponent, exponent);
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ }
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ // TODO(jbramley): We could provide a double result here using frint.
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
+ Register result = ToRegister(instr->result());
+ Label try_rounding;
+ Label done;
+
+ // Math.round() rounds to the nearest integer, with ties going towards
+ // +infinity. This does not match any IEEE-754 rounding mode.
+ // - Infinities and NaNs are propagated unchanged, but cause deopts because
+ // they can't be represented as integers.
+ // - The sign of the result is the same as the sign of the input. This means
+ // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
+ // result of -0.0.
+
+ DoubleRegister dot_five = double_scratch();
+ __ Fmov(dot_five, 0.5);
+ __ Fabs(temp1, input);
+ __ Fcmp(temp1, dot_five);
+ // If input is in [-0.5, -0], the result is -0.
+ // If input is in [+0, +0.5[, the result is +0.
+ // If the input is +0.5, the result is 1.
+ __ B(hi, &try_rounding); // hi so NaN will also branch.
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Fmov(result, input);
+ DeoptimizeIfNegative(result, instr->environment()); // [-0.5, -0.0].
+ }
+ __ Fcmp(input, dot_five);
+ __ Mov(result, 1); // +0.5.
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero, will return 0 (xzr).
+ __ Csel(result, result, xzr, eq);
+ __ B(&done);
+
+ __ Bind(&try_rounding);
+ // Since we're providing a 32-bit result, we can implement ties-to-infinity by
+ // adding 0.5 to the input, then taking the floor of the result. This does not
+ // work for very large positive doubles because adding 0.5 would cause an
+ // intermediate rounding stage, so a different approach will be necessary if a
+ // double result is needed.
+ __ Fadd(temp1, input, dot_five);
+ __ Fcvtms(result, temp1);
+
+ // Deopt if
+ // * the input was NaN
+ // * the result is not representable using a 32-bit integer.
+ __ Fcmp(input, 0.0);
+ __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
+ DeoptimizeIf(ne, instr->environment());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ HMathMinMax::Operation op = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+
+ __ Cmp(left, right);
+ __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+ } else if (instr->hydrogen()->representation().IsSmi()) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+
+ __ Cmp(left, right);
+ __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+
+ if (op == HMathMinMax::kMathMax) {
+ __ Fmax(result, left, right);
+ } else {
+ ASSERT(op == HMathMinMax::kMathMin);
+ __ Fmin(result, left, right);
+ }
+ }
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister32(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
+ HMod* hmod = instr->hydrogen();
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ Cmp(dividend, 0);
+ __ B(pl, &dividend_is_not_negative);
+ // Note that this is correct even for kMinInt operands.
+ __ Neg(dividend, dividend);
+ __ And(dividend, dividend, mask);
+ __ Negs(dividend, dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ B(&done);
+ }
+
+ __ bind(&dividend_is_not_negative);
+ __ And(dividend, dividend, mask);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ Register temp = ToRegister32(instr->temp());
+ ASSERT(!AreAliased(dividend, result, temp));
+
+ if (divisor == 0) {
+ Deoptimize(instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ Sxtw(dividend.X(), dividend);
+ __ Mov(temp, Abs(divisor));
+ __ Smsubl(result.X(), result, temp, dividend.X());
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ Cbnz(result, &remainder_not_zero);
+ DeoptimizeIfNegative(dividend, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ Register dividend = ToRegister32(instr->left());
+ Register divisor = ToRegister32(instr->right());
+ Register result = ToRegister32(instr->result());
+
+ Label deopt, done;
+ // modulo = dividend - quotient * divisor
+ __ Sdiv(result, dividend, divisor);
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ // Combine the deoptimization sites.
+ Label ok;
+ __ Cbnz(divisor, &ok);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&ok);
+ }
+ __ Msub(result, result, divisor, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cbnz(result, &done);
+ if (deopt.is_bound()) { // TODO(all) This is a hack, remove this...
+ __ Tbnz(dividend, kWSignBit, &deopt);
+ } else {
+ DeoptimizeIfNegative(dividend, instr->environment());
+ }
+ }
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
+ ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
+ bool is_smi = instr->hydrogen()->representation().IsSmi();
+ Register result =
+ is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
+ Register left =
+ is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
+ int32_t right = ToInteger32(instr->right());
+ ASSERT((right > -kMaxInt) || (right < kMaxInt));
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero) {
+ if (right < 0) {
+ // The result is -0 if right is negative and left is zero.
+ DeoptimizeIfZero(left, instr->environment());
+ } else if (right == 0) {
+ // The result is -0 if the right is zero and the left is negative.
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ }
+
+ switch (right) {
+ // Cases which can detect overflow.
+ case -1:
+ if (can_overflow) {
+ // Only 0x80000000 can overflow here.
+ __ Negs(result, left);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Neg(result, left);
+ }
+ break;
+ case 0:
+ // This case can never overflow.
+ __ Mov(result, 0);
+ break;
+ case 1:
+ // This case can never overflow.
+ __ Mov(result, left, kDiscardForSameWReg);
+ break;
+ case 2:
+ if (can_overflow) {
+ __ Adds(result, left, left);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, left);
+ }
+ break;
+
+ default:
+ // Multiplication by constant powers of two (and some related values)
+ // can be done efficiently with shifted operands.
+ int32_t right_abs = Abs(right);
+
+ if (IsPowerOf2(right_abs)) {
+ int right_log2 = WhichPowerOf2(right_abs);
+
+ if (can_overflow) {
+ Register scratch = result;
+ ASSERT(!AreAliased(scratch, left));
+ __ Cls(scratch, left);
+ __ Cmp(scratch, right_log2);
+ DeoptimizeIf(lt, instr->environment());
+ }
+
+ if (right >= 0) {
+ // result = left << log2(right)
+ __ Lsl(result, left, right_log2);
+ } else {
+ // result = -left << log2(-right)
+ if (can_overflow) {
+ __ Negs(result, Operand(left, LSL, right_log2));
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Neg(result, Operand(left, LSL, right_log2));
+ }
+ }
+ return;
+ }
+
+
+ // For the following cases, we could perform a conservative overflow check
+ // with CLS as above. However the few cycles saved are likely not worth
+ // the risk of deoptimizing more often than required.
+ ASSERT(!can_overflow);
+
+ if (right >= 0) {
+ if (IsPowerOf2(right - 1)) {
+ // result = left + left << log2(right - 1)
+ __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
+ } else if (IsPowerOf2(right + 1)) {
+ // result = -left + left << log2(right + 1)
+ __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
+ __ Neg(result, result);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ if (IsPowerOf2(-right + 1)) {
+ // result = left - left << log2(-right + 1)
+ __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
+ } else if (IsPowerOf2(-right - 1)) {
+ // result = -left - left << log2(-right - 1)
+ __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
+ __ Neg(result, result);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Register right = ToRegister32(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero && !left.Is(right)) {
+ // If one operand is zero and the other is negative, the result is -0.
+ // - Set Z (eq) if either left or right, or both, are 0.
+ __ Cmp(left, 0);
+ __ Ccmp(right, 0, ZFlag, ne);
+ // - If so (eq), set N (mi) if left + right is negative.
+ // - Otherwise, clear N.
+ __ Ccmn(left, right, NoFlag, eq);
+ DeoptimizeIf(mi, instr->environment());
+ }
+
+ if (can_overflow) {
+ __ Smull(result.X(), left, right);
+ __ Cmp(result.X(), Operand(result, SXTW));
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ __ Mul(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoMulS(LMulS* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero && !left.Is(right)) {
+ // If one operand is zero and the other is negative, the result is -0.
+ // - Set Z (eq) if either left or right, or both, are 0.
+ __ Cmp(left, 0);
+ __ Ccmp(right, 0, ZFlag, ne);
+ // - If so (eq), set N (mi) if left + right is negative.
+ // - Otherwise, clear N.
+ __ Ccmn(left, right, NoFlag, eq);
+ DeoptimizeIf(mi, instr->environment());
+ }
+
+ STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+ if (can_overflow) {
+ __ Smulh(result, left, right);
+ __ Cmp(result, Operand(result.W(), SXTW));
+ __ SmiTag(result);
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ if (AreAliased(result, left, right)) {
+ // All three registers are the same: half untag the input and then
+ // multiply, giving a tagged result.
+ STATIC_ASSERT((kSmiShift % 2) == 0);
+ __ Asr(result, left, kSmiShift / 2);
+ __ Mul(result, result, result);
+ } else if (result.Is(left) && !left.Is(right)) {
+ // Registers result and left alias, right is distinct: untag left into
+ // result, and then multiply by right, giving a tagged result.
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ } else {
+ ASSERT(!left.Is(result));
+ // Registers result and right alias, left is distinct, or all registers
+ // are distinct: untag right into result, and then multiply by left,
+ // giving a tagged result.
+ __ SmiUntag(result, right);
+ __ Mul(result, left, result);
+ }
+ }
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = ToRegister(instr->result());
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // NumberTagU and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD: public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
+ } else {
+ __ B(deferred->entry());
+ }
+
+ __ Bind(deferred->exit());
+ __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ Label slow, convert_and_store;
+ Register src = ToRegister32(value);
+ Register dst = ToRegister(instr->result());
+ Register scratch1 = ToRegister(temp1);
+
+ if (FLAG_inline_new) {
+ Register scratch2 = ToRegister(temp2);
+ __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
+ __ B(&convert_and_store);
+ }
+
+ // Slow case: call the runtime system to do the number allocation.
+ __ Bind(&slow);
+ // TODO(3095996): Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains an
+ // integer value.
+ __ Mov(dst, 0);
+ {
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagU and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, dst);
+ }
+
+ // Convert number to floating point and store in the newly allocated heap
+ // number.
+ __ Bind(&convert_and_store);
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Ucvtf(dbl_scratch, src);
+ __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2());
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ Register value = ToRegister32(instr->value());
+ Register result = ToRegister(instr->result());
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ Cmp(value, Smi::kMaxValue);
+ __ B(hi, deferred->entry());
+ __ SmiTag(result, value.X());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+
+ Label done, load_smi;
+
+ // Work out what untag mode we're working with.
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ __ JumpIfSmi(input, &load_smi);
+
+ Label convert_undefined;
+
+ // Heap number map check.
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ if (can_convert_undefined_to_nan) {
+ __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
+ &convert_undefined);
+ } else {
+ DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+ }
+
+ // Load heap number.
+ __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
+ if (instr->hydrogen()->deoptimize_on_minus_zero()) {
+ DeoptimizeIfMinusZero(result, instr->environment());
+ }
+ __ B(&done);
+
+ if (can_convert_undefined_to_nan) {
+ __ Bind(&convert_undefined);
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ B(&done);
+ }
+
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ // Fall through to load_smi.
+ }
+
+ // Smi to double register conversion.
+ __ Bind(&load_smi);
+ __ SmiUntagToDouble(result, input);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->value();
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
+ } else {
+ __ Push(ToRegister(argument));
+ }
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in x0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ Push(x0);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+ if (info()->saves_caller_doubles()) {
+ RestoreCallerDoubles();
+ }
+
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ Register stack_pointer = masm()->StackPointer();
+ __ Mov(stack_pointer, fp);
+ no_frame_start = masm_->pc_offset();
+ __ Pop(fp, lr);
+ }
+
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ __ Drop(parameter_count + 1);
+ } else {
+ Register parameter_count = ToRegister(instr->parameter_count());
+ __ DropBySMI(parameter_count);
+ }
+ __ Ret();
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Add(temp, string, Operand(ToRegister32(index), SXTW));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1));
+ }
+ return FieldMemOperand(temp, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ if (FLAG_debug_code) {
+ // Even though this lithium instruction comes with a temp register, we
+ // can't use it here because we want to use "AtStart" constraints on the
+ // inputs and the debug code here needs a scratch register.
+ UseScratchRegisterScope temps(masm());
+ Register dbg_temp = temps.AcquireX();
+
+ __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
+
+ __ And(dbg_temp, dbg_temp,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType);
+ }
+
+ MemOperand operand =
+ BuildSeqStringOperand(string, temp, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Ldrb(result, operand);
+ } else {
+ __ Ldrh(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (FLAG_debug_code) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
+ encoding_mask);
+ }
+ MemOperand operand =
+ BuildSeqStringOperand(string, temp, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Strb(value, operand);
+ } else {
+ __ Strh(value, operand);
+ }
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIfNegative(input.W(), instr->environment());
+ }
+ __ SmiTag(output, input);
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done, untag;
+
+ if (instr->needs_check()) {
+ DeoptimizeIfNotSmi(input, instr->environment());
+ }
+
+ __ Bind(&untag);
+ __ SmiUntag(result, input);
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ LOperand* right_op = instr->right();
+ Register left = ToRegister32(instr->left());
+ Register result = ToRegister32(instr->result());
+
+ if (right_op->IsRegister()) {
+ Register right = ToRegister32(instr->right());
+ switch (instr->op()) {
+ case Token::ROR: __ Ror(result, left, right); break;
+ case Token::SAR: __ Asr(result, left, right); break;
+ case Token::SHL: __ Lsl(result, left, right); break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ DeoptimizeIfNegative(left, instr->environment());
+ __ Bind(&right_not_zero);
+ }
+ __ Lsr(result, left, right);
+ break;
+ default: UNREACHABLE();
+ }
+ } else {
+ ASSERT(right_op->IsConstantOperand());
+ int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
+ if (shift_count == 0) {
+ if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ __ Mov(result, left, kDiscardForSameWReg);
+ } else {
+ switch (instr->op()) {
+ case Token::ROR: __ Ror(result, left, shift_count); break;
+ case Token::SAR: __ Asr(result, left, shift_count); break;
+ case Token::SHL: __ Lsl(result, left, shift_count); break;
+ case Token::SHR: __ Lsr(result, left, shift_count); break;
+ default: UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoShiftS(LShiftS* instr) {
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+
+ // Only ROR by register needs a temp.
+ ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
+ (instr->temp() == NULL));
+
+ if (right_op->IsRegister()) {
+ Register right = ToRegister(instr->right());
+ switch (instr->op()) {
+ case Token::ROR: {
+ Register temp = ToRegister(instr->temp());
+ __ Ubfx(temp, right, kSmiShift, 5);
+ __ SmiUntag(result, left);
+ __ Ror(result.W(), result.W(), temp.W());
+ __ SmiTag(result);
+ break;
+ }
+ case Token::SAR:
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Asr(result, left, result);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Lsl(result, left, result);
+ break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ DeoptimizeIfNegative(left, instr->environment());
+ __ Bind(&right_not_zero);
+ }
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Lsr(result, left, result);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ default: UNREACHABLE();
+ }
+ } else {
+ ASSERT(right_op->IsConstantOperand());
+ int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
+ if (shift_count == 0) {
+ if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ __ Mov(result, left);
+ } else {
+ switch (instr->op()) {
+ case Token::ROR:
+ __ SmiUntag(result, left);
+ __ Ror(result.W(), result.W(), shift_count);
+ __ SmiTag(result);
+ break;
+ case Token::SAR:
+ __ Asr(result, left, shift_count);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Lsl(result, left, shift_count);
+ break;
+ case Token::SHR:
+ __ Lsr(result, left, shift_count);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ default: UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ Debug("LDebugBreak", 0, BREAK);
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register scratch1 = x5;
+ Register scratch2 = x6;
+ ASSERT(instr->IsMarkedAsCall());
+
+ ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
+ // TODO(all): if Mov could handle object in new space then it could be used
+ // here.
+ __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
+ __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
+ __ Push(cp, scratch1, scratch2); // The context is the first argument.
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStackCheck* instr_;
+ };
+
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &done);
+
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ __ Bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new(zone()) DeferredStackCheck(this, instr);
+ __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(lo, deferred_stack_check->entry());
+
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ Bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ Register temp = ToRegister(instr->temp());
+ __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
+ __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ Ldr(scratch, target);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ } else {
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
+ }
+ }
+
+ __ Str(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch,
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+ __ Bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->value());
+ Register cell = ToRegister(instr->temp1());
+
+ // Load the cell.
+ __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register payload = ToRegister(instr->temp2());
+ __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
+ DeoptimizeIfRoot(
+ payload, Heap::kTheHoleValueRootIndex, instr->environment());
+ }
+
+ // Store the value.
+ __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
+ // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
+ Register ext_ptr = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch;
+ ElementsKind elements_kind = instr->elements_kind();
+
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ ASSERT(instr->temp() == NULL);
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ scratch = ToRegister(instr->temp());
+ }
+
+ MemOperand dst =
+ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+ key_is_constant, constant_key,
+ elements_kind,
+ instr->additional_index());
+
+ if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
+ (elements_kind == FLOAT32_ELEMENTS)) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Fcvt(dbl_scratch.S(), value);
+ __ Str(dbl_scratch.S(), dst);
+ } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
+ (elements_kind == FLOAT64_ELEMENTS)) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, dst);
+ } else {
+ Register value = ToRegister(instr->value());
+
+ switch (elements_kind) {
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ Strb(value, dst);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ Strh(value, dst);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ Str(value.W(), dst);
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
+ Register elements = ToRegister(instr->elements());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register store_base = no_reg;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ store_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Fmov(dbl_scratch,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ Fmaxnm(dbl_scratch, dbl_scratch, value);
+ __ Str(dbl_scratch, FieldMemOperand(store_base, offset));
+ } else {
+ __ Str(value, FieldMemOperand(store_base, offset));
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = no_reg;
+ Register store_base = no_reg;
+ Register key = no_reg;
+ int offset = 0;
+
+ if (!instr->key()->IsConstantOperand() ||
+ instr->hydrogen()->NeedsWriteBarrier()) {
+ scratch = ToRegister(instr->temp());
+ }
+
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ store_base = scratch;
+ key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ Representation representation = instr->hydrogen()->value()->representation();
+ if (representation.IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
+ Representation::Integer32());
+ } else {
+ __ Store(value, FieldMemOperand(store_base, offset), representation);
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ ASSERT(representation.IsTagged());
+ // This assignment may cause element_addr to alias store_base.
+ Register element_addr = scratch;
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Add(element_addr, store_base, offset - kHeapObjectTag);
+ __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
+ kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).Is(x2));
+ ASSERT(ToRegister(instr->key()).Is(x1));
+ ASSERT(ToRegister(instr->value()).Is(x0));
+
+ Handle<Code> ic = instr->strict_mode() == STRICT
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
+ Register object = ToRegister(instr->object());
+ HObjectAccess access = instr->hydrogen()->access();
+ Handle<Map> transition = instr->transition();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ ASSERT(transition.is_null());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ Register value = ToRegister(instr->value());
+ __ Store(value, MemOperand(object, offset), representation);
+ return;
+ } else if (representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ FPRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register value = ToRegister(instr->value());
+
+ SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject() &&
+ !instr->hydrogen()->value()->type().IsHeapObject()) {
+ DeoptimizeIfSmi(value, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
+ }
+
+ if (!transition.is_null()) {
+ // Store the new map value.
+ Register new_map_value = ToRegister(instr->temp0());
+ __ Mov(new_map_value, Operand(transition));
+ __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ // Update the write barrier for the map field.
+ __ RecordWriteField(object,
+ HeapObject::kMapOffset,
+ new_map_value,
+ ToRegister(instr->temp1()),
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ }
+ }
+
+ // Do the store.
+ Register destination;
+ if (access.IsInobject()) {
+ destination = object;
+ } else {
+ Register temp0 = ToRegister(instr->temp0());
+ __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ destination = temp0;
+ }
+
+ if (representation.IsSmi() &&
+ instr->hydrogen()->value()->representation().IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+#ifdef DEBUG
+ Register temp0 = ToRegister(instr->temp0());
+ __ Ldr(temp0, FieldMemOperand(destination, offset));
+ __ AssertSmi(temp0);
+ // If destination aliased temp0, restore it to the address calculated
+ // earlier.
+ if (destination.Is(temp0)) {
+ ASSERT(!access.IsInobject());
+ __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ }
+#endif
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Store(value, UntagSmiFieldMemOperand(destination, offset),
+ Representation::Integer32());
+ } else {
+ __ Store(value, FieldMemOperand(destination, offset), representation);
+ }
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ __ RecordWriteField(destination,
+ offset,
+ value, // Clobbered.
+ ToRegister(instr->temp1()), // Clobbered.
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->value()).is(x0));
+ ASSERT(ToRegister(instr->object()).is(x1));
+
+ // Name must be in x2.
+ __ Mov(x2, Operand(instr->name()));
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).Is(x1));
+ ASSERT(ToRegister(instr->right()).Is(x0));
+ StringAddStub stub(instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new(zone()) DeferredStringCharCodeAt(this, instr);
+
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister32(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ Push(index);
+
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
+ instr->context());
+ __ AssertSmi(x0);
+ __ SmiUntag(x0);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new(zone()) DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister32(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ __ Cmp(char_code, String::kMaxOneByteCharCode);
+ __ B(hi, deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ Add(result, result, Operand(char_code, SXTW, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(eq, deferred->entry());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(char_code);
+ __ Push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+
+ Condition condition = TokenToCondition(op, false);
+
+ EmitCompareAndBranch(instr, condition, x0, 0);
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+ if (can_overflow) {
+ __ Subs(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Sub(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoSubS(LSubS* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+ if (can_overflow) {
+ __ Subs(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Sub(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ Register input = ToRegister(value);
+ Register scratch1 = ToRegister(temp1);
+ DoubleRegister dbl_scratch1 = double_scratch();
+
+ Label done;
+
+ // Load heap object map.
+ __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
+
+ if (instr->truncating()) {
+ Register output = ToRegister(instr->result());
+ Label check_bools;
+
+ // If it's not a heap number, jump to undefined check.
+ __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
+
+ // A heap number: load value and convert to int32 using truncating function.
+ __ TruncateHeapNumberToI(output, input);
+ __ B(&done);
+
+ __ Bind(&check_bools);
+
+ Register true_root = output;
+ Register false_root = scratch1;
+ __ LoadTrueFalseRoots(true_root, false_root);
+ __ Cmp(input, true_root);
+ __ Cset(output, eq);
+ __ Ccmp(input, false_root, ZFlag, ne);
+ __ B(eq, &done);
+
+ // Output contains zero, undefined is converted to zero for truncating
+ // conversions.
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+ } else {
+ Register output = ToRegister32(instr->result());
+
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
+
+ // Deoptimized if it's not a heap number.
+ DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+
+ // A heap number: load value and convert to int32 using non-truncating
+ // function. If the result is out of range, branch to deoptimize.
+ __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2);
+ DeoptimizeIf(ne, instr->environment());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(output, 0);
+ __ B(ne, &done);
+ __ Fmov(scratch1, dbl_scratch1);
+ DeoptimizeIfNegative(scratch1, instr->environment());
+ }
+ }
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2());
+ }
+
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(output, input);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+ __ JumpIfNotSmi(input, deferred->entry());
+ __ SmiUntag(output, input);
+ __ Bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->value()).Is(x0));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ __ Push(x0);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Label materialized;
+ // Registers will be used as follows:
+ // x7 = literals array.
+ // x1 = regexp literal.
+ // x0 = regexp literal clone.
+ // x10-x12 are used as temporaries.
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadObject(x7, instr->hydrogen()->literals());
+ __ Ldr(x1, FieldMemOperand(x7, literal_offset));
+ __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
+
+ // Create regexp literal using runtime function
+ // Result will be in x0.
+ __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ Mov(x11, Operand(instr->hydrogen()->pattern()));
+ __ Mov(x10, Operand(instr->hydrogen()->flags()));
+ __ Push(x7, x12, x11, x10);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+ __ Mov(x1, x0);
+
+ __ Bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+
+ __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&runtime_allocate);
+ __ Mov(x0, Smi::FromInt(size));
+ __ Push(x1, x0);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ __ Pop(x1);
+
+ __ Bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+ __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map = ToRegister(instr->temp2());
+ __ Mov(new_map, Operand(to_map));
+ __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
+ GetLinkRegisterState(), kDontSaveFPRegs);
+ } else {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ PushSafepointRegistersScope scope(
+ this, Safepoint::kWithRegistersAndDoubles);
+ __ Mov(x0, object);
+ __ Mov(x1, Operand(to_map));
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
+ __ CallStub(&stub);
+ RecordSafepointWithRegistersAndDoubles(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ }
+ __ Bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ Label no_memento_found;
+ __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found);
+ Deoptimize(instr->environment());
+ __ Bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ TruncateDoubleToI(result, input);
+ if (instr->tag_result()) {
+ __ SmiTag(result, result);
+ }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ Register input = ToRegister(instr->value());
+ __ Push(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Handle<String> type_name = instr->type_literal();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Register value = ToRegister(instr->value());
+
+ if (type_name->Equals(heap()->number_string())) {
+ ASSERT(instr->temp1() != NULL);
+ Register map = ToRegister(instr->temp1());
+
+ __ JumpIfSmi(value, true_label);
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->string_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ __ JumpIfObjectType(
+ value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+
+ } else if (type_name->Equals(heap()->symbol_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->boolean_string())) {
+ __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
+ __ CompareRoot(value, Heap::kFalseValueRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ __ CompareRoot(value, Heap::kNullValueRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->undefined_string())) {
+ ASSERT(instr->temp1() != NULL);
+ Register scratch = ToRegister(instr->temp1());
+
+ __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
+ __ JumpIfSmi(value, false_label);
+ // Check for undetectable objects and jump to the true branch in this case.
+ __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
+
+ } else if (type_name->Equals(heap()->function_string())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ ASSERT(instr->temp1() != NULL);
+ Register type = ToRegister(instr->temp1());
+
+ __ JumpIfSmi(value, false_label);
+ __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
+ // HeapObject's type has been loaded into type register by JumpIfObjectType.
+ EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
+
+ } else if (type_name->Equals(heap()->object_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ if (!FLAG_harmony_typeof) {
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
+ }
+ __ JumpIfObjectType(value, map, scratch,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
+ __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(gt, false_label);
+ // Check for undetectable objects => false.
+ __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+
+ } else {
+ __ B(false_label);
+ }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ Register temp = ToRegister(instr->temp());
+ __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Cmp(map, temp);
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // If the receiver is null or undefined, we have to pass the global object as
+ // a receiver to normal functions. Values have to be passed unchanged to
+ // builtins and strict-mode functions.
+ Label global_object, done, deopt;
+
+ if (!instr->hydrogen()->known_function()) {
+ __ Ldr(result, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+
+ // CompilerHints is an int32 field. See objects.h.
+ __ Ldr(result.W(),
+ FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
+
+ // Do not transform the receiver to object for strict mode functions.
+ __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done);
+
+ // Do not transform the receiver to object for builtins.
+ __ Tbnz(result, SharedFunctionInfo::kNative, &done);
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
+
+ // Deoptimize if the receiver is not a JS object.
+ __ JumpIfSmi(receiver, &deopt);
+ __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
+ __ Mov(result, receiver);
+ __ B(ge, &done);
+ // Otherwise, fall through to deopt.
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&global_object);
+ __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+
+ __ AssertSmi(index);
+
+ Label out_of_object, done;
+ __ Cmp(index, Smi::FromInt(0));
+ __ B(lt, &out_of_object);
+
+ STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+ __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
+
+ __ B(&done);
+
+ __ Bind(&out_of_object);
+ __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ Bind(&done);
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.h b/deps/v8/src/arm64/lithium-codegen-arm64.h
new file mode 100644
index 0000000000..b1d8b70d5b
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.h
@@ -0,0 +1,490 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+
+#include "arm64/lithium-arm64.h"
+
+#include "arm64/lithium-gap-resolver-arm64.h"
+#include "deoptimizer.h"
+#include "lithium-codegen.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+#include "v8utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+class BranchGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ deoptimizations_(4, info->zone()),
+ deopt_jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
+ osr_pc_offset_(-1),
+ frame_is_built_(false),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+ // Simple accessors.
+ Scope* scope() const { return scope_; }
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ LinkRegisterStatus GetLinkRegisterState() const {
+ return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+ }
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+ Register ToRegister32(LOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ Operand ToOperand32I(LOperand* op);
+ Operand ToOperand32U(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ // TODO(jbramley): Examine these helpers and check that they make sense.
+ // IsInteger32Constant returns true for smi constants, for example.
+ bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ // Return a double scratch register which can be used locally
+ // when generating code for a lithium instruction.
+ DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+
+ Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
+ void DoGap(LGap* instr);
+
+ // Generic version of EmitBranch. It contains some code to avoid emitting a
+ // branch on the next emitted basic block where we could just fall-through.
+ // You shouldn't use that directly but rather consider one of the helper like
+ // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
+ template<class InstrType>
+ void EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch);
+
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition condition);
+
+ template<class InstrType>
+ void EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs);
+
+ template<class InstrType>
+ void EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask);
+
+ template<class InstrType>
+ void EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch);
+
+ template<class InstrType>
+ void EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value);
+
+ template<class InstrType>
+ void EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index);
+
+ // Emits optimized code to deep-copy the contents of statically known object
+ // graphs (e.g. object literal boilerplate). Expects a pointer to the
+ // allocated destination object in the result register, and a pointer to the
+ // source object in the source register.
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ Register scratch,
+ int* offset,
+ AllocationSiteMode mode);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+ SmiCheck check_needed);
+
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ MemOperand BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding);
+ void DeoptimizeBranch(
+ LEnvironment* environment,
+ BranchType branch_type, Register reg = NoReg, int bit = -1,
+ Deoptimizer::BailoutType* override_bailout_type = NULL);
+ void Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type = NULL);
+ void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIfZero(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
+ void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
+ void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
+ void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+
+ MemOperand PrepareKeyedExternalArrayOperand(Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int additional_index);
+ void CalcKeyedArrayBaseRegister(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+ void Abort(BailoutReason reason);
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
+ // Code generation steps. Returns true if code generation should continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateDeoptJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void LoadContextFromDeferred(LOperand* context);
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context);
+
+ // Generate a direct call to a known function.
+ // If the function is already loaded into x1 by the caller, function_reg may
+ // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
+ // automatically load it.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ Register function_reg = NoReg);
+
+ // Support for recording safepoint and position information.
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ int osr_pc_offset_;
+ bool frame_is_built_;
+
+ // Builder that keeps track of safepoints in the code. The table itself is
+ // emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ int old_position_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ PushSafepointRegistersScope(LCodeGen* codegen,
+ Safepoint::Kind kind)
+ : codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = kind;
+
+ UseScratchRegisterScope temps(codegen_->masm_);
+ // Preserve the value of lr which must be saved on the stack (the call to
+ // the stub will clobber it).
+ Register to_be_pushed_lr =
+ temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
+ codegen_->masm_->Mov(to_be_pushed_lr, lr);
+ switch (codegen_->expected_safepoint_kind_) {
+ case Safepoint::kWithRegisters: {
+ StoreRegistersStateStub stub(kDontSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ StoreRegistersStateStub stub(kSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ~PushSafepointRegistersScope() {
+ Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+ ASSERT((kind & Safepoint::kWithRegisters) != 0);
+ switch (kind) {
+ case Safepoint::kWithRegisters: {
+ RestoreRegistersStateStub stub(kDontSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ RestoreRegistersStateStub stub(kSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() { }
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
+};
+
+
+// This is the abstract class used by EmitBranchGeneric.
+// It is used to emit code for conditional branching. The Emit() function
+// emits code to branch when the condition holds and EmitInverted() emits
+// the branch when the inverted condition is verified.
+//
+// For actual examples of condition see the concrete implementation in
+// lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
+class BranchGenerator BASE_EMBEDDED {
+ public:
+ explicit BranchGenerator(LCodeGen* codegen)
+ : codegen_(codegen) { }
+
+ virtual ~BranchGenerator() { }
+
+ virtual void Emit(Label* label) const = 0;
+ virtual void EmitInverted(Label* label) const = 0;
+
+ protected:
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ LCodeGen* codegen_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
new file mode 100644
index 0000000000..f0a2e6bd0e
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
@@ -0,0 +1,334 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "arm64/lithium-gap-resolver-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+// We use the root register to spill a value while breaking a cycle in parallel
+// moves. We don't need access to roots while resolving the move list and using
+// the root register has two advantages:
+// - It is not in crankshaft allocatable registers list, so it can't interfere
+// with any of the moves we are resolving.
+// - We don't need to push it on the stack, as we can reload it with its value
+// once we have resolved a cycle.
+#define kSavedValue root
+
+// We use the MacroAssembler floating-point scratch register to break a cycle
+// involving double values as the MacroAssembler will not need it for the
+// operations performed by the gap resolver.
+#define kSavedDoubleValue fp_scratch
+
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
+ saved_destination_(NULL), need_to_restore_root_(false) { }
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when we reach this move again.
+ PerformMove(i);
+ if (in_cycle_) RestoreValue();
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+
+ if (!move.IsEliminated()) {
+ ASSERT(move.source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ if (need_to_restore_root_) {
+ ASSERT(kSavedValue.Is(root));
+ __ InitializeRootRegister();
+ need_to_restore_root_ = false;
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+ LMoveOperands& current_move = moves_[index];
+
+ ASSERT(!current_move.IsPending());
+ ASSERT(!current_move.IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
+ LOperand* destination = current_move.destination();
+ current_move.set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ current_move.set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+void LGapResolver::BreakCycle(int index) {
+ ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+
+ // We use registers which are not allocatable by crankshaft to break the cycle
+ // to be sure they don't interfere with the moves we are resolving.
+ ASSERT(!kSavedValue.IsAllocatable());
+ ASSERT(!kSavedDoubleValue.IsAllocatable());
+
+ // We save in a register the source of that move and we remember its
+ // destination. Then we mark this move as resolved so the cycle is
+ // broken and we can perform the other moves.
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+
+ if (source->IsRegister()) {
+ need_to_restore_root_ = true;
+ __ Mov(kSavedValue, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ need_to_restore_root_ = true;
+ __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
+ cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
+ __ Fmov(kSavedDoubleValue, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
+ cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
+ __ Ldr(kSavedDoubleValue, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+
+ // Mark this move as resolved.
+ // This move will be actually performed by moving the saved value to this
+ // move's destination in LGapResolver::RestoreValue().
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ if (saved_destination_->IsRegister()) {
+ __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedDoubleValue);
+ cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ Str(kSavedDoubleValue, cgen_->ToMemOperand(saved_destination_));
+ cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ Str(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ldr(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ EmitStackSlotMove(index);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsSmi(constant_source)) {
+ __ Mov(dst, cgen_->ToSmi(constant_source));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
+ __ Mov(dst, cgen_->ToInteger32(constant_source));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ __ Fmov(result, cgen_->ToDouble(constant_source));
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ need_to_restore_root_ = true;
+ if (cgen_->IsSmi(constant_source)) {
+ __ Mov(kSavedValue, cgen_->ToSmi(constant_source));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
+ __ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
+ } else {
+ __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
+ }
+ __ Str(kSavedValue, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ Fmov(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ Str(src, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand src = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ Ldr(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ EmitStackSlotMove(index);
+ }
+
+ } else {
+ UNREACHABLE();
+ }
+
+ // The move has been emitted, we can eliminate it.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::EmitStackSlotMove(int index) {
+ // We need a temp register to perform a stack slot to stack slot move, and
+ // the register must not be involved in breaking cycles.
+
+ // Use the Crankshaft double scratch register as the temporary.
+ DoubleRegister temp = crankshaft_fp_scratch;
+
+ LOperand* src = moves_[index].source();
+ LOperand* dst = moves_[index].destination();
+
+ ASSERT(src->IsStackSlot());
+ ASSERT(dst->IsStackSlot());
+ __ Ldr(temp, cgen_->ToMemOperand(src));
+ __ Str(temp, cgen_->ToMemOperand(dst));
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.h b/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
new file mode 100644
index 0000000000..d1637b65a9
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
@@ -0,0 +1,90 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Emit a move from one stack slot to another.
+ void EmitStackSlotMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+
+ // We use the root register as a scratch in a few places. When that happens,
+ // this flag is set to indicate that it needs to be restored.
+ bool need_to_restore_root_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
new file mode 100644
index 0000000000..d660d36016
--- /dev/null
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -0,0 +1,1677 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
+#define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
+
+#include <ctype.h>
+
+#include "v8globals.h"
+#include "globals.h"
+
+#include "arm64/assembler-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/instrument-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
+ return UntagSmiMemOperand(object, offset - kHeapObjectTag);
+}
+
+
+MemOperand UntagSmiMemOperand(Register object, int offset) {
+ // Assumes that Smis are shifted by 32 bits and little endianness.
+ STATIC_ASSERT(kSmiShift == 32);
+ return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
+}
+
+
+Handle<Object> MacroAssembler::CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+}
+
+
+void MacroAssembler::And(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, AND);
+}
+
+
+void MacroAssembler::Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Tst(const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, BIC);
+}
+
+
+void MacroAssembler::Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, BICS);
+}
+
+
+void MacroAssembler::Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ORR);
+}
+
+
+void MacroAssembler::Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ORN);
+}
+
+
+void MacroAssembler::Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, EOR);
+}
+
+
+void MacroAssembler::Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, EON);
+}
+
+
+void MacroAssembler::Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
+ }
+}
+
+
+void MacroAssembler::Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
+ }
+}
+
+
+void MacroAssembler::Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
+ }
+}
+
+void MacroAssembler::Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, SetFlags, ADD);
+ }
+}
+
+
+void MacroAssembler::Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
+ }
+}
+
+
+void MacroAssembler::Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, SetFlags, SUB);
+ }
+}
+
+
+void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Adds(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Subs(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Neg(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ if (operand.IsImmediate()) {
+ Mov(rd, -operand.immediate());
+ } else {
+ Sub(rd, AppropriateZeroRegFor(rd), operand);
+ }
+}
+
+
+void MacroAssembler::Negs(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Subs(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void MacroAssembler::Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void MacroAssembler::Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void MacroAssembler::Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void MacroAssembler::Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void MacroAssembler::Ngc(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbc(rd, zr, operand);
+}
+
+
+void MacroAssembler::Ngcs(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbcs(rd, zr, operand);
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Mov(rd, ~imm);
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
+void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
+ ASSERT(allow_macro_instructions_); \
+ LoadStoreMacro(REG, addr, OP); \
+}
+LS_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+
+void MacroAssembler::Adr(const Register& rd, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ adr(rd, label);
+}
+
+
+void MacroAssembler::Asr(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ asr(rd, rn, shift);
+}
+
+
+void MacroAssembler::Asr(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ asrv(rd, rn, rm);
+}
+
+
+void MacroAssembler::B(Label* label) {
+ b(label);
+ CheckVeneerPool(false, false);
+}
+
+
+void MacroAssembler::B(Condition cond, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ B(label, cond);
+}
+
+
+void MacroAssembler::Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ bfi(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ bfxil(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Bind(Label* label) {
+ ASSERT(allow_macro_instructions_);
+ bind(label);
+}
+
+
+void MacroAssembler::Bl(Label* label) {
+ ASSERT(allow_macro_instructions_);
+ bl(label);
+}
+
+
+void MacroAssembler::Blr(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ blr(xn);
+}
+
+
+void MacroAssembler::Br(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ br(xn);
+}
+
+
+void MacroAssembler::Brk(int code) {
+ ASSERT(allow_macro_instructions_);
+ brk(code);
+}
+
+
+void MacroAssembler::Cinc(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cinc(rd, rn, cond);
+}
+
+
+void MacroAssembler::Cinv(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cinv(rd, rn, cond);
+}
+
+
+void MacroAssembler::Cls(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ cls(rd, rn);
+}
+
+
+void MacroAssembler::Clz(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ clz(rd, rn);
+}
+
+
+void MacroAssembler::Cneg(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cneg(rd, rn, cond);
+}
+
+
+// Conditionally zero the destination register. Only X registers are supported
+// due to the truncation side-effect when used on W registers.
+void MacroAssembler::CzeroX(const Register& rd,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsSP() && rd.Is64Bits());
+ ASSERT((cond != al) && (cond != nv));
+ csel(rd, xzr, rd, cond);
+}
+
+
+// Conditionally move a value into the destination register. Only X registers
+// are supported due to the truncation side-effect when used on W registers.
+void MacroAssembler::CmovX(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsSP());
+ ASSERT(rd.Is64Bits() && rn.Is64Bits());
+ ASSERT((cond != al) && (cond != nv));
+ if (!rd.is(rn)) {
+ csel(rd, rn, rd, cond);
+ }
+}
+
+
+void MacroAssembler::Cset(const Register& rd, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cset(rd, cond);
+}
+
+
+void MacroAssembler::Csetm(const Register& rd, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csetm(rd, cond);
+}
+
+
+void MacroAssembler::Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
+ ASSERT(allow_macro_instructions_);
+ dmb(domain, type);
+}
+
+
+void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
+ ASSERT(allow_macro_instructions_);
+ dsb(domain, type);
+}
+
+
+void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
+ ASSERT(allow_macro_instructions_);
+ debug(message, code, params);
+}
+
+
+void MacroAssembler::Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ extr(rd, rn, rm, lsb);
+}
+
+
+void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fabs(fd, fn);
+}
+
+
+void MacroAssembler::Fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fadd(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ fccmp(fn, fm, nzcv, cond);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fcmp(fn, fm);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
+ ASSERT(allow_macro_instructions_);
+ if (value != 0.0) {
+ UseScratchRegisterScope temps(this);
+ FPRegister tmp = temps.AcquireSameSizeAs(fn);
+ Fmov(tmp, value);
+ fcmp(fn, tmp);
+ } else {
+ fcmp(fn, value);
+ }
+}
+
+
+void MacroAssembler::Fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ fcsel(fd, fn, fm, cond);
+}
+
+
+void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fcvt(fd, fn);
+}
+
+
+void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtas(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtau(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtms(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtmu(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtns(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtnu(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtzs(rd, fn);
+}
+void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtzu(rd, fn);
+}
+
+
+void MacroAssembler::Fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fdiv(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fmadd(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmax(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmaxnm(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmin(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fminnm(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
+ ASSERT(allow_macro_instructions_);
+ // Only emit an instruction if fd and fn are different, and they are both D
+ // registers. fmov(s0, s0) is not a no-op because it clears the top word of
+ // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
+ // top of q0, but FPRegister does not currently support Q registers.
+ if (!fd.Is(fn) || !fd.Is64Bits()) {
+ fmov(fd, fn);
+ }
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, Register rn) {
+ ASSERT(allow_macro_instructions_);
+ fmov(fd, rn);
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, double imm) {
+ ASSERT(allow_macro_instructions_);
+ if (fd.Is32Bits()) {
+ Fmov(fd, static_cast<float>(imm));
+ return;
+ }
+
+ ASSERT(fd.Is64Bits());
+ if (IsImmFP64(imm)) {
+ fmov(fd, imm);
+ } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
+ fmov(fd, xzr);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ // TODO(all): Use Assembler::ldr(const FPRegister& ft, double imm).
+ Mov(tmp, double_to_rawbits(imm));
+ Fmov(fd, tmp);
+ }
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, float imm) {
+ ASSERT(allow_macro_instructions_);
+ if (fd.Is64Bits()) {
+ Fmov(fd, static_cast<double>(imm));
+ return;
+ }
+
+ ASSERT(fd.Is32Bits());
+ if (IsImmFP32(imm)) {
+ fmov(fd, imm);
+ } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
+ fmov(fd, wzr);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireW();
+ // TODO(all): Use Assembler::ldr(const FPRegister& ft, float imm).
+ Mov(tmp, float_to_rawbits(imm));
+ Fmov(fd, tmp);
+ }
+}
+
+
+void MacroAssembler::Fmov(Register rd, FPRegister fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fmov(rd, fn);
+}
+
+
+void MacroAssembler::Fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fmsub(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmul(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fneg(fd, fn);
+}
+
+
+void MacroAssembler::Fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fnmadd(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fnmsub(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frinta(fd, fn);
+}
+
+
+void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintn(fd, fn);
+}
+
+
+void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintz(fd, fn);
+}
+
+
+void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fsqrt(fd, fn);
+}
+
+
+void MacroAssembler::Fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fsub(fd, fn, fm);
+}
+
+
+void MacroAssembler::Hint(SystemHint code) {
+ ASSERT(allow_macro_instructions_);
+ hint(code);
+}
+
+
+void MacroAssembler::Hlt(int code) {
+ ASSERT(allow_macro_instructions_);
+ hlt(code);
+}
+
+
+void MacroAssembler::Isb() {
+ ASSERT(allow_macro_instructions_);
+ isb();
+}
+
+
+void MacroAssembler::Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!AreAliased(rt, rt2));
+ ldnp(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!AreAliased(rt, rt2));
+ ldp(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ ASSERT(!rt2.IsZero());
+ ldpsw(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
+ ASSERT(allow_macro_instructions_);
+ ldr(ft, imm);
+}
+
+
+void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ ldr(rt, imm);
+}
+
+
+void MacroAssembler::Lsl(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsl(rd, rn, shift);
+}
+
+
+void MacroAssembler::Lsl(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lslv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Lsr(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsr(rd, rn, shift);
+}
+
+
+void MacroAssembler::Lsr(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsrv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ madd(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ mneg(rd, rn, rm);
+}
+
+
+void MacroAssembler::Mov(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ // Emit a register move only if the registers are distinct, or if they are
+ // not X registers. Note that mov(w0, w0) is not a no-op because it clears
+ // the top word of x0.
+ if (!rd.Is(rn) || !rd.Is64Bits()) {
+ Assembler::mov(rd, rn);
+ }
+}
+
+
+void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ movk(rd, imm, shift);
+}
+
+
+void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ mrs(rt, sysreg);
+}
+
+
+void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ msr(sysreg, rt);
+}
+
+
+void MacroAssembler::Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ msub(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ mul(rd, rn, rm);
+}
+
+
+void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rbit(rd, rn);
+}
+
+
+void MacroAssembler::Ret(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ ret(xn);
+ CheckVeneerPool(false, false);
+}
+
+
+void MacroAssembler::Rev(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev(rd, rn);
+}
+
+
+void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev16(rd, rn);
+}
+
+
+void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev32(rd, rn);
+}
+
+
+void MacroAssembler::Ror(const Register& rd,
+ const Register& rs,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ror(rd, rs, shift);
+}
+
+
+void MacroAssembler::Ror(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rorv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sbfiz(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sbfx(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ ASSERT(allow_macro_instructions_);
+ scvtf(fd, rn, fbits);
+}
+
+
+void MacroAssembler::Sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sdiv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smaddl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smsubl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smull(rd, rn, rm);
+}
+
+
+void MacroAssembler::Smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smulh(rd, rn, rm);
+}
+
+
+void MacroAssembler::Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ ASSERT(allow_macro_instructions_);
+ stnp(rt, rt2, dst);
+}
+
+
+void MacroAssembler::Stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ ASSERT(allow_macro_instructions_);
+ stp(rt, rt2, dst);
+}
+
+
+void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxtb(rd, rn);
+}
+
+
+void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxth(rd, rn);
+}
+
+
+void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxtw(rd, rn);
+}
+
+
+void MacroAssembler::Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ubfiz(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ubfx(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ ASSERT(allow_macro_instructions_);
+ ucvtf(fd, rn, fbits);
+}
+
+
+void MacroAssembler::Udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ udiv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ umaddl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ umsubl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxtb(rd, rn);
+}
+
+
+void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxth(rd, rn);
+}
+
+
+void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxtw(rd, rn);
+}
+
+
+void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
+ ASSERT(!csp.Is(sp_));
+ // TODO(jbramley): Several callers rely on this not using scratch registers,
+ // so we use the assembler directly here. However, this means that large
+ // immediate values of 'space' cannot be handled cleanly. (Only 24-bits
+ // immediates or values of 'space' that can be encoded in one instruction are
+ // accepted.) Once we implement our flexible scratch register idea, we could
+ // greatly simplify this function.
+ InstructionAccurateScope scope(this);
+ if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
+ // The subtract instruction supports a 12-bit immediate, shifted left by
+ // zero or 12 bits. So, in two instructions, we can subtract any immediate
+ // between zero and (1 << 24) - 1.
+ int64_t imm = space.immediate();
+ ASSERT(is_uint24(imm));
+
+ int64_t imm_top_12_bits = imm >> 12;
+ sub(csp, StackPointer(), imm_top_12_bits << 12);
+ imm -= imm_top_12_bits << 12;
+ if (imm > 0) {
+ sub(csp, csp, imm);
+ }
+ } else {
+ sub(csp, StackPointer(), space);
+ }
+}
+
+
+void MacroAssembler::InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ Mov(root, Operand(roots_array_start));
+}
+
+
+void MacroAssembler::SmiTag(Register dst, Register src) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ Lsl(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
+
+
+void MacroAssembler::SmiUntag(Register dst, Register src) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(src);
+ }
+ Asr(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
+
+
+void MacroAssembler::SmiUntagToDouble(FPRegister dst,
+ Register src,
+ UntagMode mode) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ AssertSmi(src);
+ }
+ Scvtf(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiUntagToFloat(FPRegister dst,
+ Register src,
+ UntagMode mode) {
+ ASSERT(dst.Is32Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ AssertSmi(src);
+ }
+ Scvtf(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::JumpIfSmi(Register value,
+ Label* smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ // Check if the tag bit is set.
+ if (smi_label) {
+ Tbz(value, 0, smi_label);
+ if (not_smi_label) {
+ B(not_smi_label);
+ }
+ } else {
+ ASSERT(not_smi_label);
+ Tbnz(value, 0, not_smi_label);
+ }
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ JumpIfSmi(value, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfBothSmi(Register value1,
+ Register value2,
+ Label* both_smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ // Check if both tag bits are clear.
+ Orr(tmp, value1, value2);
+ JumpIfSmi(tmp, both_smi_label, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register value1,
+ Register value2,
+ Label* either_smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ // Check if either tag bit is clear.
+ And(tmp, value1, value2);
+ JumpIfSmi(tmp, either_smi_label, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfEitherNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label) {
+ JumpIfBothSmi(value1, value2, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfBothNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label) {
+ JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::IsObjectNameType(Register object,
+ Register type,
+ Label* fail) {
+ CompareObjectType(object, type, type, LAST_NAME_TYPE);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ // If cmp result is lt, the following ccmp will clear all flags.
+ // Z == 0, N == V implies gt condition.
+ Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
+
+ // If we didn't get a valid label object just fall through and leave the
+ // flags updated.
+ if (fail != NULL) {
+ B(gt, fail);
+ }
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+ Register type,
+ Label* not_string,
+ Label* string) {
+ Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
+
+ STATIC_ASSERT(kStringTag == 0);
+ ASSERT((string != NULL) || (not_string != NULL));
+ if (string == NULL) {
+ TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
+ } else if (not_string == NULL) {
+ TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
+ } else {
+ TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
+ B(string);
+ }
+}
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mov(tmp, Operand(handle));
+ Push(tmp);
+}
+
+
+void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
+ uint64_t size = count * unit_size;
+
+ if (size == 0) {
+ return;
+ }
+
+ if (csp.Is(StackPointer())) {
+ ASSERT(size % 16 == 0);
+ } else {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+
+ if (unit_size == 0) {
+ return;
+ }
+
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const Operand size(count, LSL, shift);
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (!csp.Is(StackPointer())) {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
+ const Operand size(count_smi,
+ (shift >= 0) ? (LSL) : (LSR),
+ (shift >= 0) ? (shift) : (-shift));
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (!csp.Is(StackPointer())) {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
+ uint64_t size = count * unit_size;
+
+ if (size == 0) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (csp.Is(StackPointer())) {
+ ASSERT(size % 16 == 0);
+ } else if (emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+
+ if (unit_size == 0) {
+ return;
+ }
+
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const Operand size(count, LSL, shift);
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
+ const Operand size(count_smi,
+ (shift >= 0) ? (LSL) : (LSR),
+ (shift >= 0) ? (shift) : (-shift));
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::CompareAndBranch(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* label) {
+ if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
+ ((cond == eq) || (cond == ne))) {
+ if (cond == eq) {
+ Cbz(lhs, label);
+ } else {
+ Cbnz(lhs, label);
+ }
+ } else {
+ Cmp(lhs, rhs);
+ B(cond, label);
+ }
+}
+
+
+void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label) {
+ int bits = reg.SizeInBits();
+ ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ if (CountSetBits(bit_pattern, bits) == 1) {
+ Tbnz(reg, MaskToBit(bit_pattern), label);
+ } else {
+ Tst(reg, bit_pattern);
+ B(ne, label);
+ }
+}
+
+
+void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label) {
+ int bits = reg.SizeInBits();
+ ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ if (CountSetBits(bit_pattern, bits) == 1) {
+ Tbz(reg, MaskToBit(bit_pattern), label);
+ } else {
+ Tst(reg, bit_pattern);
+ B(eq, label);
+ }
+}
+
+
+void MacroAssembler::InlineData(uint64_t data) {
+ ASSERT(is_uint16(data));
+ InstructionAccurateScope scope(this, 1);
+ movz(xzr, data);
+}
+
+
+void MacroAssembler::EnableInstrumentation() {
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateEnable);
+}
+
+
+void MacroAssembler::DisableInstrumentation() {
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateDisable);
+}
+
+
+void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
+ ASSERT(strlen(marker_name) == 2);
+
+ // We allow only printable characters in the marker names. Unprintable
+ // characters are reserved for controlling features of the instrumentation.
+ ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, (marker_name[1] << 8) | marker_name[0]);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
new file mode 100644
index 0000000000..08ddb8782a
--- /dev/null
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -0,0 +1,5184 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "bootstrapper.h"
+#include "codegen.h"
+#include "cpu-profiler.h"
+#include "debug.h"
+#include "isolate-inl.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
+#define __
+
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate,
+ byte * buffer,
+ unsigned buffer_size)
+ : Assembler(arg_isolate, buffer, buffer_size),
+ generating_stub_(false),
+#if DEBUG
+ allow_macro_instructions_(true),
+#endif
+ has_frame_(false),
+ use_real_aborts_(true),
+ sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
+}
+
+
+void MacroAssembler::LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ UseScratchRegisterScope temps(this);
+
+ if (operand.NeedsRelocation()) {
+ Register temp = temps.AcquireX();
+ LoadRelocated(temp, operand);
+ Logical(rd, rn, temp, op);
+
+ } else if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.SizeInBits();
+ ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = ~immediate;
+ if (rd.Is32Bits()) {
+ immediate &= kWRegMask;
+ }
+ }
+
+ // Special cases for all set or all clear immediates.
+ if (immediate == 0) {
+ switch (op) {
+ case AND:
+ Mov(rd, 0);
+ return;
+ case ORR: // Fall through.
+ case EOR:
+ Mov(rd, rn);
+ return;
+ case ANDS: // Fall through.
+ case BICS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if ((rd.Is64Bits() && (immediate == -1L)) ||
+ (rd.Is32Bits() && (immediate == 0xffffffffL))) {
+ switch (op) {
+ case AND:
+ Mov(rd, rn);
+ return;
+ case ORR:
+ Mov(rd, immediate);
+ return;
+ case EOR:
+ Mvn(rd, rn);
+ return;
+ case ANDS: // Fall through.
+ case BICS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // Immediate can't be encoded: synthesize using move immediate.
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, immediate);
+ if (rd.Is(csp)) {
+ // If rd is the stack pointer we cannot use it as the destination
+ // register so we use the temp register as an intermediate again.
+ Logical(temp, rn, temp, op);
+ Mov(csp, temp);
+ } else {
+ Logical(rd, rn, temp, op);
+ }
+ }
+
+ } else if (operand.IsExtendedRegister()) {
+ ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ // Add/sub extended supports shift <= 4. We want to support exactly the
+ // same modes here.
+ ASSERT(operand.shift_amount() <= 4);
+ ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ Register temp = temps.AcquireSameSizeAs(rn);
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ Logical(rd, rn, temp, op);
+
+ } else {
+ // The operand can be encoded in the instruction.
+ ASSERT(operand.IsShiftedRegister());
+ Logical(rd, rn, operand, op);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+ ASSERT(!rd.IsZero());
+
+ // TODO(all) extend to support more immediates.
+ //
+ // Immediates on Aarch64 can be produced using an initial value, and zero to
+ // three move keep operations.
+ //
+ // Initial values can be generated with:
+ // 1. 64-bit move zero (movz).
+ // 2. 32-bit move inverted (movn).
+ // 3. 64-bit move inverted.
+ // 4. 32-bit orr immediate.
+ // 5. 64-bit orr immediate.
+ // Move-keep may then be used to modify each of the 16-bit half-words.
+ //
+ // The code below supports all five initial value generators, and
+ // applying move-keep operations to move-zero and move-inverted initial
+ // values.
+
+ unsigned reg_size = rd.SizeInBits();
+ unsigned n, imm_s, imm_r;
+ if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't
+ // write to the stack pointer.
+ movz(rd, imm);
+ } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
+ // Immediate can be represented in a move inverted instruction. Movn can't
+ // write to the stack pointer.
+ movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
+ } else {
+ // Generic immediate case. Imm will be represented by
+ // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
+ // A move-zero or move-inverted is generated for the first non-zero or
+ // non-0xffff immX, and a move-keep for subsequent non-zero immX.
+
+ uint64_t ignored_halfword = 0;
+ bool invert_move = false;
+ // If the number of 0xffff halfwords is greater than the number of 0x0000
+ // halfwords, it's more efficient to use move-inverted.
+ if (CountClearHalfWords(~imm, reg_size) >
+ CountClearHalfWords(imm, reg_size)) {
+ ignored_halfword = 0xffffL;
+ invert_move = true;
+ }
+
+ // Mov instructions can't move immediate values into the stack pointer, so
+ // set up a temporary register, if needed.
+ UseScratchRegisterScope temps(this);
+ Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
+
+ // Iterate through the halfwords. Use movn/movz for the first non-ignored
+ // halfword, and movk for subsequent halfwords.
+ ASSERT((reg_size % 16) == 0);
+ bool first_mov_done = false;
+ for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
+ uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
+ if (imm16 != ignored_halfword) {
+ if (!first_mov_done) {
+ if (invert_move) {
+ movn(temp, (~imm16) & 0xffffL, 16 * i);
+ } else {
+ movz(temp, imm16, 16 * i);
+ }
+ first_mov_done = true;
+ } else {
+ // Construct a wider constant.
+ movk(temp, imm16, 16 * i);
+ }
+ }
+ }
+ ASSERT(first_mov_done);
+
+ // Move the temporary if the original destination register was the stack
+ // pointer.
+ if (rd.IsSP()) {
+ mov(rd, temp);
+ }
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+
+ // Provide a swap register for instructions that need to write into the
+ // system stack pointer (and can't do this inherently).
+ UseScratchRegisterScope temps(this);
+ Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(dst, operand);
+
+ } else if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(dst, operand.immediate());
+
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Emit a shift instruction if moving a shifted register. This operation
+ // could also be achieved using an orr instruction (like orn used by Mvn),
+ // but using a shift instruction makes the disassembly clearer.
+ EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
+
+ } else if (operand.IsExtendedRegister()) {
+ // Emit an extend instruction if moving an extended register. This handles
+ // extend with post-shift operations, too.
+ EmitExtendShift(dst, operand.reg(), operand.extend(),
+ operand.shift_amount());
+
+ } else {
+ // Otherwise, emit a register move only if the registers are distinct, or
+ // if they are not X registers.
+ //
+ // Note that mov(w0, w0) is not a no-op because it clears the top word of
+ // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
+ // registers is not required to clear the top word of the X register. In
+ // this case, the instruction is discarded.
+ //
+ // If csp is an operand, add #0 is emitted, otherwise, orr #0.
+ if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
+ (discard_mode == kDontDiscardForSameWReg))) {
+ Assembler::mov(rd, operand.reg());
+ }
+ // This case can handle writes into the system stack pointer directly.
+ dst = rd;
+ }
+
+ // Copy the result to the system stack pointer.
+ if (!dst.Is(rd)) {
+ ASSERT(rd.IsSP());
+ Assembler::mov(rd, dst);
+ }
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(rd, operand);
+ mvn(rd, rd);
+
+ } else if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(rd, ~operand.immediate());
+
+ } else if (operand.IsExtendedRegister()) {
+ // Emit two instructions for the extend case. This differs from Mov, as
+ // the extend and invert can't be achieved in one instruction.
+ EmitExtendShift(rd, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ mvn(rd, rd);
+
+ } else {
+ mvn(rd, operand);
+ }
+}
+
+
+unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+ ASSERT((reg_size % 8) == 0);
+ int count = 0;
+ for (unsigned i = 0; i < (reg_size / 16); i++) {
+ if ((imm & 0xffff) == 0) {
+ count++;
+ }
+ imm >>= 16;
+ }
+ return count;
+}
+
+
+// The movz instruction can generate immediates containing an arbitrary 16-bit
+// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
+bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
+ ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
+ return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
+}
+
+
+// The movn instruction can generate immediates containing an arbitrary 16-bit
+// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
+bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
+ return IsImmMovz(~imm, reg_size);
+}
+
+
+void MacroAssembler::ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ ASSERT((cond != al) && (cond != nv));
+ if (operand.NeedsRelocation()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ LoadRelocated(temp, operand);
+ ConditionalCompareMacro(rn, temp, nzcv, cond, op);
+
+ } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
+ (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
+ // The immediate can be encoded in the instruction, or the operand is an
+ // unshifted register: call the assembler.
+ ConditionalCompare(rn, operand, nzcv, cond, op);
+
+ } else {
+ // The operand isn't directly supported by the instruction: perform the
+ // operation on a temporary register.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ ConditionalCompare(rn, temp, nzcv, cond, op);
+ }
+}
+
+
+void MacroAssembler::Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ if (operand.IsImmediate()) {
+ // Immediate argument. Handle special cases of 0, 1 and -1 using zero
+ // register.
+ int64_t imm = operand.immediate();
+ Register zr = AppropriateZeroRegFor(rn);
+ if (imm == 0) {
+ csel(rd, rn, zr, cond);
+ } else if (imm == 1) {
+ csinc(rd, rn, zr, cond);
+ } else if (imm == -1) {
+ csinv(rd, rn, zr, cond);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand.immediate());
+ csel(rd, rn, temp, cond);
+ }
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
+ // Unshifted register argument.
+ csel(rd, rn, operand.reg(), cond);
+ } else {
+ // All other arguments.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ csel(rd, rn, temp, cond);
+ }
+}
+
+
+void MacroAssembler::AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
+ !operand.NeedsRelocation() && (S == LeaveFlags)) {
+ // The instruction would be a nop. Avoid generating useless code.
+ return;
+ }
+
+ if (operand.NeedsRelocation()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ LoadRelocated(temp, operand);
+ AddSubMacro(rd, rn, temp, S, op);
+ } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ } else {
+ AddSub(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ UseScratchRegisterScope temps(this);
+
+ if (operand.NeedsRelocation()) {
+ Register temp = temps.AcquireX();
+ LoadRelocated(temp, operand);
+ AddSubWithCarryMacro(rd, rn, temp, S, op);
+
+ } else if (operand.IsImmediate() ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ // Add/sub with carry (immediate or ROR shifted register.)
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Add/sub with carry (shifted register).
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ ASSERT(operand.shift() != ROR);
+ ASSERT(is_uintn(operand.shift_amount(),
+ rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
+ : kWRegSizeInBitsLog2));
+ Register temp = temps.AcquireSameSizeAs(rn);
+ EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else if (operand.IsExtendedRegister()) {
+ // Add/sub with carry (extended register).
+ ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ // Add/sub extended supports a shift <= 4. We want to support exactly the
+ // same modes.
+ ASSERT(operand.shift_amount() <= 4);
+ ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ Register temp = temps.AcquireSameSizeAs(rn);
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else {
+ // The addressing mode is directly supported by the instruction.
+ AddSubWithCarry(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ int64_t offset = addr.offset();
+ LSDataSize size = CalcLSDataSize(op);
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
+ !IsImmLSUnscaled(offset)) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(addr.base());
+ Mov(temp, addr.offset());
+ LoadStore(rt, MemOperand(addr.base(), temp), op);
+ } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
+ // Post-index beyond unscaled addressing range.
+ LoadStore(rt, MemOperand(addr.base()), op);
+ add(addr.base(), addr.base(), offset);
+ } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
+ // Pre-index beyond unscaled addressing range.
+ add(addr.base(), addr.base(), offset);
+ LoadStore(rt, MemOperand(addr.base()), op);
+ } else {
+ // Encodable in one load/store instruction.
+ LoadStore(rt, addr, op);
+ }
+}
+
+
+void MacroAssembler::Load(const Register& rt,
+ const MemOperand& addr,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+
+ if (r.IsInteger8()) {
+ Ldrsb(rt, addr);
+ } else if (r.IsUInteger8()) {
+ Ldrb(rt, addr);
+ } else if (r.IsInteger16()) {
+ Ldrsh(rt, addr);
+ } else if (r.IsUInteger16()) {
+ Ldrh(rt, addr);
+ } else if (r.IsInteger32()) {
+ Ldr(rt.W(), addr);
+ } else {
+ ASSERT(rt.Is64Bits());
+ Ldr(rt, addr);
+ }
+}
+
+
+void MacroAssembler::Store(const Register& rt,
+ const MemOperand& addr,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ Strb(rt, addr);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ Strh(rt, addr);
+ } else if (r.IsInteger32()) {
+ Str(rt.W(), addr);
+ } else {
+ ASSERT(rt.Is64Bits());
+ Str(rt, addr);
+ }
+}
+
+
+bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
+ Label *label, ImmBranchType b_type) {
+ bool need_longer_range = false;
+ // There are two situations in which we care about the offset being out of
+ // range:
+ // - The label is bound but too far away.
+ // - The label is not bound but linked, and the previous branch
+ // instruction in the chain is too far away.
+ if (label->is_bound() || label->is_linked()) {
+ need_longer_range =
+ !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
+ }
+ if (!need_longer_range && !label->is_bound()) {
+ int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
+ unresolved_branches_.insert(
+ std::pair<int, FarBranchInfo>(max_reachable_pc,
+ FarBranchInfo(pc_offset(), label)));
+ // Also maintain the next pool check.
+ next_veneer_pool_check_ =
+ Min(next_veneer_pool_check_,
+ max_reachable_pc - kVeneerDistanceCheckMargin);
+ }
+ return need_longer_range;
+}
+
+
+void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
+ ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
+ (bit == -1 || type >= kBranchTypeFirstUsingBit));
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ B(static_cast<Condition>(type), label);
+ } else {
+ switch (type) {
+ case always: B(label); break;
+ case never: break;
+ case reg_zero: Cbz(reg, label); break;
+ case reg_not_zero: Cbnz(reg, label); break;
+ case reg_bit_clear: Tbz(reg, bit, label); break;
+ case reg_bit_set: Tbnz(reg, bit, label); break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+
+void MacroAssembler::B(Label* label, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
+
+ if (need_extra_instructions) {
+ b(&done, InvertCondition(cond));
+ B(label);
+ } else {
+ b(label, cond);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+
+ if (need_extra_instructions) {
+ tbz(rt, bit_pos, &done);
+ B(label);
+ } else {
+ tbnz(rt, bit_pos, label);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+
+ if (need_extra_instructions) {
+ tbnz(rt, bit_pos, &done);
+ B(label);
+ } else {
+ tbz(rt, bit_pos, label);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Cbnz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+
+ if (need_extra_instructions) {
+ cbz(rt, &done);
+ B(label);
+ } else {
+ cbnz(rt, label);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Cbz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+
+ if (need_extra_instructions) {
+ cbnz(rt, &done);
+ B(label);
+ } else {
+ cbz(rt, label);
+ }
+ bind(&done);
+}
+
+
+// Pseudo-instructions.
+
+
+void MacroAssembler::Abs(const Register& rd, const Register& rm,
+ Label* is_not_representable,
+ Label* is_representable) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(AreSameSizeAndType(rd, rm));
+
+ Cmp(rm, 1);
+ Cneg(rd, rm, lt);
+
+ // If the comparison sets the v flag, the input was the smallest value
+ // representable by rm, and the mathematical result of abs(rm) is not
+ // representable using two's complement.
+ if ((is_not_representable != NULL) && (is_representable != NULL)) {
+ B(is_not_representable, vs);
+ B(is_representable);
+ } else if (is_not_representable != NULL) {
+ B(is_not_representable, vs);
+ } else if (is_representable != NULL) {
+ B(is_representable, vc);
+ }
+}
+
+
+// Abstracted stack operations.
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3) {
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+
+ int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
+ int size = src0.SizeInBytes();
+
+ PrepareForPush(count, size);
+ PushHelper(count, size, src0, src1, src2, src3);
+}
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3,
+ const CPURegister& src4, const CPURegister& src5,
+ const CPURegister& src6, const CPURegister& src7) {
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
+
+ int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
+ int size = src0.SizeInBytes();
+
+ PrepareForPush(count, size);
+ PushHelper(4, size, src0, src1, src2, src3);
+ PushHelper(count - 4, size, src4, src5, src6, src7);
+}
+
+
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
+ ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ ASSERT(dst0.IsValid());
+
+ int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
+ int size = dst0.SizeInBytes();
+
+ PrepareForPop(count, size);
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::PushPopQueue::PushQueued() {
+ if (queued_.empty()) return;
+
+ masm_->PrepareForPush(size_);
+
+ int count = queued_.size();
+ int index = 0;
+ while (index < count) {
+ // PushHelper can only handle registers with the same size and type, and it
+ // can handle only four at a time. Batch them up accordingly.
+ CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
+ int batch_index = 0;
+ do {
+ batch[batch_index++] = queued_[index++];
+ } while ((batch_index < 4) && (index < count) &&
+ batch[0].IsSameSizeAndType(queued_[index]));
+
+ masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
+ batch[0], batch[1], batch[2], batch[3]);
+ }
+
+ queued_.clear();
+}
+
+
+void MacroAssembler::PushPopQueue::PopQueued() {
+ if (queued_.empty()) return;
+
+ masm_->PrepareForPop(size_);
+
+ int count = queued_.size();
+ int index = 0;
+ while (index < count) {
+ // PopHelper can only handle registers with the same size and type, and it
+ // can handle only four at a time. Batch them up accordingly.
+ CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
+ int batch_index = 0;
+ do {
+ batch[batch_index++] = queued_[index++];
+ } while ((batch_index < 4) && (index < count) &&
+ batch[0].IsSameSizeAndType(queued_[index]));
+
+ masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
+ batch[0], batch[1], batch[2], batch[3]);
+ }
+
+ queued_.clear();
+}
+
+
+void MacroAssembler::PushCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+
+ PrepareForPush(registers.Count(), size);
+ // Push up to four registers at a time because if the current stack pointer is
+ // csp and reg_size is 32, registers must be pushed in blocks of four in order
+ // to maintain the 16-byte alignment for csp.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& src0 = registers.PopHighestIndex();
+ const CPURegister& src1 = registers.PopHighestIndex();
+ const CPURegister& src2 = registers.PopHighestIndex();
+ const CPURegister& src3 = registers.PopHighestIndex();
+ int count = count_before - registers.Count();
+ PushHelper(count, size, src0, src1, src2, src3);
+ }
+}
+
+
+void MacroAssembler::PopCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+
+ PrepareForPop(registers.Count(), size);
+ // Pop up to four registers at a time because if the current stack pointer is
+ // csp and reg_size is 32, registers must be pushed in blocks of four in
+ // order to maintain the 16-byte alignment for csp.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ const CPURegister& dst2 = registers.PopLowestIndex();
+ const CPURegister& dst3 = registers.PopLowestIndex();
+ int count = count_before - registers.Count();
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+ }
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
+ int size = src.SizeInBytes();
+
+ PrepareForPush(count, size);
+
+ if (FLAG_optimize_for_size && count > 8) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Label loop;
+ __ Mov(temp, count / 2);
+ __ Bind(&loop);
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ __ Subs(temp, temp, 1);
+ __ B(ne, &loop);
+
+ count %= 2;
+ }
+
+ // Push up to four registers at a time if possible because if the current
+ // stack pointer is csp and the register size is 32, registers must be pushed
+ // in blocks of four in order to maintain the 16-byte alignment for csp.
+ while (count >= 4) {
+ PushHelper(4, size, src, src, src, src);
+ count -= 4;
+ }
+ if (count >= 2) {
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ count -= 2;
+ }
+ if (count == 1) {
+ PushHelper(1, size, src, NoReg, NoReg, NoReg);
+ count -= 1;
+ }
+ ASSERT(count == 0);
+}
+
+
+void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
+ PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(count);
+
+ if (FLAG_optimize_for_size) {
+ Label loop, done;
+
+ Subs(temp, count, 1);
+ B(mi, &done);
+
+ // Push all registers individually, to save code size.
+ Bind(&loop);
+ Subs(temp, temp, 1);
+ PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
+ B(pl, &loop);
+
+ Bind(&done);
+ } else {
+ Label loop, leftover2, leftover1, done;
+
+ Subs(temp, count, 4);
+ B(mi, &leftover2);
+
+ // Push groups of four first.
+ Bind(&loop);
+ Subs(temp, temp, 4);
+ PushHelper(4, src.SizeInBytes(), src, src, src, src);
+ B(pl, &loop);
+
+ // Push groups of two.
+ Bind(&leftover2);
+ Tbz(count, 1, &leftover1);
+ PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
+
+ // Push the last one (if required).
+ Bind(&leftover1);
+ Tbz(count, 0, &done);
+ PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
+
+ Bind(&done);
+ }
+}
+
+
+void MacroAssembler::PushHelper(int count, int size,
+ const CPURegister& src0,
+ const CPURegister& src1,
+ const CPURegister& src2,
+ const CPURegister& src3) {
+ // Ensure that we don't unintentially modify scratch or debug registers.
+ InstructionAccurateScope scope(this);
+
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ ASSERT(size == src0.SizeInBytes());
+
+ // When pushing multiple registers, the store order is chosen such that
+ // Push(a, b) is equivalent to Push(a) followed by Push(b).
+ switch (count) {
+ case 1:
+ ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
+ break;
+ case 2:
+ ASSERT(src2.IsNone() && src3.IsNone());
+ stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
+ break;
+ case 3:
+ ASSERT(src3.IsNone());
+ stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
+ str(src0, MemOperand(StackPointer(), 2 * size));
+ break;
+ case 4:
+ // Skip over 4 * size, then fill in the gap. This allows four W registers
+ // to be pushed using csp, whilst maintaining 16-byte alignment for csp
+ // at all times.
+ stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(StackPointer(), 2 * size));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PopHelper(int count, int size,
+ const CPURegister& dst0,
+ const CPURegister& dst1,
+ const CPURegister& dst2,
+ const CPURegister& dst3) {
+ // Ensure that we don't unintentially modify scratch or debug registers.
+ InstructionAccurateScope scope(this);
+
+ ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ ASSERT(size == dst0.SizeInBytes());
+
+ // When popping multiple registers, the load order is chosen such that
+ // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
+ switch (count) {
+ case 1:
+ ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
+ break;
+ case 2:
+ ASSERT(dst2.IsNone() && dst3.IsNone());
+ ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
+ break;
+ case 3:
+ ASSERT(dst3.IsNone());
+ ldr(dst2, MemOperand(StackPointer(), 2 * size));
+ ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
+ break;
+ case 4:
+ // Load the higher addresses first, then load the lower addresses and
+ // skip the whole block in the second instruction. This allows four W
+ // registers to be popped using csp, whilst maintaining 16-byte alignment
+ // for csp at all times.
+ ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
+ ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PrepareForPush(Operand total_size) {
+ // TODO(jbramley): This assertion generates too much code in some debug tests.
+ // AssertStackConsistency();
+ if (csp.Is(StackPointer())) {
+ // If the current stack pointer is csp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ ASSERT((total_size.immediate() % 16) == 0);
+ }
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
+ } else {
+ // Even if the current stack pointer is not the system stack pointer (csp),
+ // the system stack pointer will still be modified in order to comply with
+ // ABI rules about accessing memory below the system stack pointer.
+ BumpSystemStackPointer(total_size);
+ }
+}
+
+
+void MacroAssembler::PrepareForPop(Operand total_size) {
+ AssertStackConsistency();
+ if (csp.Is(StackPointer())) {
+ // If the current stack pointer is csp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ ASSERT((total_size.immediate() % 16) == 0);
+ }
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
+ }
+}
+
+
+void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ ASSERT(offset.immediate() >= 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, kStackAccessBelowStackPointer);
+ }
+
+ Str(src, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ ASSERT(offset.immediate() >= 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, kStackAccessBelowStackPointer);
+ }
+
+ Ldr(dst, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PokePair(const CPURegister& src1,
+ const CPURegister& src2,
+ int offset) {
+ ASSERT(AreSameSizeAndType(src1, src2));
+ ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
+ Stp(src1, src2, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PeekPair(const CPURegister& dst1,
+ const CPURegister& dst2,
+ int offset) {
+ ASSERT(AreSameSizeAndType(dst1, dst2));
+ ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
+ Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PushCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
+
+ // This method must not be called unless the current stack pointer is the
+ // system stack pointer (csp).
+ ASSERT(csp.Is(StackPointer()));
+
+ MemOperand tos(csp, -2 * kXRegSize, PreIndex);
+
+ stp(d14, d15, tos);
+ stp(d12, d13, tos);
+ stp(d10, d11, tos);
+ stp(d8, d9, tos);
+
+ stp(x29, x30, tos);
+ stp(x27, x28, tos); // x28 = jssp
+ stp(x25, x26, tos);
+ stp(x23, x24, tos);
+ stp(x21, x22, tos);
+ stp(x19, x20, tos);
+}
+
+
+void MacroAssembler::PopCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
+
+ // This method must not be called unless the current stack pointer is the
+ // system stack pointer (csp).
+ ASSERT(csp.Is(StackPointer()));
+
+ MemOperand tos(csp, 2 * kXRegSize, PostIndex);
+
+ ldp(x19, x20, tos);
+ ldp(x21, x22, tos);
+ ldp(x23, x24, tos);
+ ldp(x25, x26, tos);
+ ldp(x27, x28, tos); // x28 = jssp
+ ldp(x29, x30, tos);
+
+ ldp(d8, d9, tos);
+ ldp(d10, d11, tos);
+ ldp(d12, d13, tos);
+ ldp(d14, d15, tos);
+}
+
+
+void MacroAssembler::AssertStackConsistency() {
+ if (emit_debug_code()) {
+ if (csp.Is(StackPointer())) {
+ // We can't check the alignment of csp without using a scratch register
+ // (or clobbering the flags), but the processor (or simulator) will abort
+ // if it is not properly aligned during a load.
+ ldr(xzr, MemOperand(csp, 0));
+ } else if (FLAG_enable_slow_asserts) {
+ Label ok;
+ // Check that csp <= StackPointer(), preserving all registers and NZCV.
+ sub(StackPointer(), csp, StackPointer());
+ cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
+ tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
+
+ Abort(kTheCurrentStackPointerIsBelowCsp);
+
+ bind(&ok);
+ // Restore StackPointer().
+ sub(StackPointer(), csp, StackPointer());
+ }
+ }
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+ Heap::RootListIndex index) {
+ // TODO(jbramley): Most root values are constants, and can be synthesized
+ // without a load. Refer to the ARM back end for details.
+ Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index) {
+ Str(source, MemOperand(root, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadTrueFalseRoots(Register true_root,
+ Register false_root) {
+ STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
+ Ldp(true_root, false_root,
+ MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ Mov(result, Operand(cell));
+ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ } else {
+ Mov(result, Operand(object));
+ }
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Map::EnumLengthBits::kMask);
+}
+
+
+void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Smi::FromInt(Map::EnumLengthBits::kMask));
+}
+
+
+void MacroAssembler::CheckEnumCache(Register object,
+ Register null_value,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* call_runtime) {
+ ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
+ scratch3));
+
+ Register empty_fixed_array_value = scratch0;
+ Register current_object = scratch1;
+
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Label next, start;
+
+ Mov(current_object, object);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ Register map = scratch2;
+ Register enum_length = scratch3;
+ Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
+
+ EnumLengthUntagged(enum_length, map);
+ Cmp(enum_length, kInvalidEnumCacheSentinel);
+ B(eq, call_runtime);
+
+ B(&start);
+
+ Bind(&next);
+ Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
+
+ // For all objects but the receiver, check that the cache is empty.
+ EnumLengthUntagged(enum_length, map);
+ Cbnz(enum_length, call_runtime);
+
+ Bind(&start);
+
+ // Check that there are no elements. Register current_object contains the
+ // current JS object we've reached through the prototype chain.
+ Label no_elements;
+ Ldr(current_object, FieldMemOperand(current_object,
+ JSObject::kElementsOffset));
+ Cmp(current_object, empty_fixed_array_value);
+ B(eq, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
+ B(ne, call_runtime);
+
+ Bind(&no_elements);
+ Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
+ Cmp(current_object, null_value);
+ B(ne, &next);
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ Add(scratch1, receiver,
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
+ Cmp(scratch1, new_space_start);
+ B(lt, no_memento_found);
+
+ Mov(scratch2, new_space_allocation_top);
+ Ldr(scratch2, MemOperand(scratch2));
+ Cmp(scratch1, scratch2);
+ B(gt, no_memento_found);
+
+ Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
+ Cmp(scratch1,
+ Operand(isolate()->factory()->allocation_memento_map()));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry(Register exception,
+ Register object,
+ Register state,
+ Register scratch1,
+ Register scratch2) {
+ // Handler expects argument in x0.
+ ASSERT(exception.Is(x0));
+
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
+ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
+ Lsr(scratch2, state, StackHandler::kKindWidth);
+ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
+ Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
+ Br(scratch1);
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Condition cond,
+ Label* branch) {
+ ASSERT(cond == eq || cond == ne);
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ And(temp, object, ExternalReference::new_space_mask(isolate()));
+ Cmp(temp, ExternalReference::new_space_start(isolate()));
+ B(cond, branch);
+}
+
+
+void MacroAssembler::Throw(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The handler expects the exception in x0.
+ ASSERT(value.Is(x0));
+
+ // Drop the stack pointer to the top of the top handler.
+ ASSERT(jssp.Is(StackPointer()));
+ Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ Ldr(jssp, MemOperand(scratch1));
+ // Restore the next handler.
+ Pop(scratch2);
+ Str(scratch2, MemOperand(scratch1));
+
+ // Get the code object and state. Restore the context and frame pointer.
+ Register object = scratch1;
+ Register state = scratch2;
+ Pop(object, state, cp, fp);
+
+ // If the handler is a JS frame, restore the context to the frame.
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
+ Label not_js_frame;
+ Cbz(cp, &not_js_frame);
+ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ Bind(&not_js_frame);
+
+ JumpToHandlerEntry(value, object, state, scratch3, scratch4);
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The handler expects the exception in x0.
+ ASSERT(value.Is(x0));
+
+ // Drop the stack pointer to the top of the top stack handler.
+ ASSERT(jssp.Is(StackPointer()));
+ Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ Ldr(jssp, MemOperand(scratch1));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label fetch_next, check_kind;
+ B(&check_kind);
+ Bind(&fetch_next);
+ Peek(jssp, StackHandlerConstants::kNextOffset);
+
+ Bind(&check_kind);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+ Peek(scratch2, StackHandlerConstants::kStateOffset);
+ TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ Pop(scratch2);
+ Str(scratch2, MemOperand(scratch1));
+
+ // Get the code object and state. Clear the context and frame pointer (0 was
+ // saved in the handler).
+ Register object = scratch1;
+ Register state = scratch2;
+ Pop(object, state, cp, fp);
+
+ JumpToHandlerEntry(value, object, state, scratch3, scratch4);
+}
+
+
+void MacroAssembler::Throw(BailoutReason reason) {
+ Label throw_start;
+ Bind(&throw_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ RecordComment("Throw message: ");
+ RecordComment((msg != NULL) ? msg : "UNKNOWN");
+#endif
+
+ Mov(x0, Smi::FromInt(reason));
+ Push(x0);
+
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
+ }
+ // ThrowMessage should not return here.
+ Unreachable();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label ok;
+ B(InvertCondition(cc), &ok);
+ Throw(reason);
+ Bind(&ok);
+}
+
+
+void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) {
+ Label ok;
+ JumpIfNotSmi(value, &ok);
+ Throw(reason);
+ Bind(&ok);
+}
+
+
+void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
+ ASSERT(smi.Is64Bits());
+ Abs(smi, smi, slow);
+}
+
+
+void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(eq, reason);
+ }
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, reason);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ AssertNotSmi(object, kOperandIsASmiAndNotAName);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, LAST_NAME_TYPE);
+ Check(ls, kOperandIsNotAName);
+ }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
+ Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell);
+ Bind(&done_checking);
+ }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, kOperandIsASmiAndNotAString);
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
+ Check(lo, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All arguments must be on the stack before this function is called.
+ // x0 holds the return value after the call.
+
+ // Check that the number of arguments matches what the function expects.
+ // If f->nargs is -1, the function can accept a variable number of arguments.
+ if (f->nargs >= 0 && f->nargs != num_arguments) {
+ // Illegal operation: drop the stack arguments and return undefined.
+ if (num_arguments > 0) {
+ Drop(num_arguments);
+ }
+ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ return;
+ }
+
+ // Place the necessary arguments.
+ Mov(x0, num_arguments);
+ Mov(x1, ExternalReference(f, isolate()));
+
+ CEntryStub stub(1, save_doubles);
+ CallStub(&stub);
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
+ ASM_LOCATION("CallApiFunctionAndReturn");
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate());
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate()),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate()),
+ next_address);
+
+ ASSERT(function_address.is(x1) || function_address.is(x2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag));
+ Ldrb(w10, MemOperand(x10));
+ Cbz(w10, &profiler_disabled);
+ Mov(x3, thunk_ref);
+ B(&end_profiler_check);
+
+ Bind(&profiler_disabled);
+ Mov(x3, function_address);
+ Bind(&end_profiler_check);
+
+ // Save the callee-save registers we are going to use.
+ // TODO(all): Is this necessary? ARM doesn't do it.
+ STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
+ Poke(x19, (spill_offset + 0) * kXRegSize);
+ Poke(x20, (spill_offset + 1) * kXRegSize);
+ Poke(x21, (spill_offset + 2) * kXRegSize);
+ Poke(x22, (spill_offset + 3) * kXRegSize);
+
+ // Allocate HandleScope in callee-save registers.
+ // We will need to restore the HandleScope after the call to the API function,
+ // by allocating it in callee-save registers they will be preserved by C code.
+ Register handle_scope_base = x22;
+ Register next_address_reg = x19;
+ Register limit_reg = x20;
+ Register level_reg = w21;
+
+ Mov(handle_scope_base, next_address);
+ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ Add(level_reg, level_reg, 1);
+ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ Mov(x0, ExternalReference::isolate_address(isolate()));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub;
+ stub.GenerateCall(this, x3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ Mov(x0, ExternalReference::isolate_address(isolate()));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ Ldr(x0, return_value_operand);
+ Bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ if (emit_debug_code()) {
+ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
+ Cmp(w1, level_reg);
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ Sub(level_reg, level_reg, 1);
+ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
+ Cmp(limit_reg, x1);
+ B(ne, &delete_allocated_handles);
+
+ Bind(&leave_exit_frame);
+ // Restore callee-saved registers.
+ Peek(x19, (spill_offset + 0) * kXRegSize);
+ Peek(x20, (spill_offset + 1) * kXRegSize);
+ Peek(x21, (spill_offset + 2) * kXRegSize);
+ Peek(x22, (spill_offset + 3) * kXRegSize);
+
+ // Check if the function scheduled an exception.
+ Mov(x5, ExternalReference::scheduled_exception_address(isolate()));
+ Ldr(x5, MemOperand(x5));
+ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
+ Bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ Ldr(cp, *context_restore_operand);
+ }
+
+ LeaveExitFrame(false, x1, !restore_context);
+ Drop(stack_space);
+ Ret();
+
+ Bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(
+ Runtime::kHiddenPromoteScheduledException, isolate()), 0);
+ }
+ B(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ Bind(&delete_allocated_handles);
+ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ // Save the return value in a callee-save register.
+ Register saved_result = x19;
+ Mov(saved_result, x0);
+ Mov(x0, ExternalReference::isolate_address(isolate()));
+ CallCFunction(
+ ExternalReference::delete_handle_scope_extensions(isolate()), 1);
+ Mov(x0, saved_result);
+ B(&leave_exit_frame);
+}
+
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ Mov(x0, num_arguments);
+ Mov(x1, ext);
+
+ CEntryStub stub(1);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+ Mov(x1, builtin);
+ CEntryStub stub(1);
+ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the builtins object into target register.
+ Ldr(target, GlobalObjectMemOperand());
+ Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ Ldr(target, FieldMemOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target,
+ Register function,
+ Builtins::JavaScript id) {
+ ASSERT(!AreAliased(target, function));
+ GetBuiltinFunction(function, id);
+ // Load the code entry point from the builtins object.
+ Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ ASM_LOCATION("MacroAssembler::InvokeBuiltin");
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Get the builtin entry in x2 and setup the function object in x1.
+ GetBuiltinEntry(x2, x1, id);
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(x2));
+ Call(x2);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(x2);
+ }
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Mov(x0, num_arguments);
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
+}
+
+
+void MacroAssembler::InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(string, length, scratch1, scratch2));
+ LoadRoot(scratch2, map_index);
+ SmiTag(scratch1, length);
+ Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+
+ Mov(scratch2, String::kEmptyHashField);
+ Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_ARM64
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one ARM
+ // platform for another ARM platform with a different alignment.
+ return OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_ARM64
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // V8_HOST_ARCH_ARM64
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args) {
+ CallCFunction(function, num_of_reg_args, 0);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args,
+ int num_of_double_args) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, function);
+ CallCFunction(temp, num_of_reg_args, num_of_double_args);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_of_reg_args,
+ int num_of_double_args) {
+ ASSERT(has_frame());
+ // We can pass 8 integer arguments in registers. If we need to pass more than
+ // that, we'll need to implement support for passing them on the stack.
+ ASSERT(num_of_reg_args <= 8);
+
+ // If we're passing doubles, we're limited to the following prototypes
+ // (defined by ExternalReference::Type):
+ // BUILTIN_COMPARE_CALL: int f(double, double)
+ // BUILTIN_FP_FP_CALL: double f(double, double)
+ // BUILTIN_FP_CALL: double f(double)
+ // BUILTIN_FP_INT_CALL: double f(double, int)
+ if (num_of_double_args > 0) {
+ ASSERT(num_of_reg_args <= 1);
+ ASSERT((num_of_double_args + num_of_reg_args) <= 2);
+ }
+
+
+ // If the stack pointer is not csp, we need to derive an aligned csp from the
+ // current stack pointer.
+ const Register old_stack_pointer = StackPointer();
+ if (!csp.Is(old_stack_pointer)) {
+ AssertStackConsistency();
+
+ int sp_alignment = ActivationFrameAlignment();
+ // The ABI mandates at least 16-byte alignment.
+ ASSERT(sp_alignment >= 16);
+ ASSERT(IsPowerOf2(sp_alignment));
+
+ // The current stack pointer is a callee saved register, and is preserved
+ // across the call.
+ ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
+
+ // Align and synchronize the system stack pointer with jssp.
+ Bic(csp, old_stack_pointer, sp_alignment - 1);
+ SetStackPointer(csp);
+ }
+
+ // Call directly. The function called cannot cause a GC, or allow preemption,
+ // so the return address in the link register stays correct.
+ Call(function);
+
+ if (!csp.Is(old_stack_pointer)) {
+ if (emit_debug_code()) {
+ // Because the stack pointer must be aligned on a 16-byte boundary, the
+ // aligned csp can be up to 12 bytes below the jssp. This is the case
+ // where we only pushed one W register on top of an aligned jssp.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ ASSERT(ActivationFrameAlignment() == 16);
+ Sub(temp, csp, old_stack_pointer);
+ // We want temp <= 0 && temp >= -12.
+ Cmp(temp, 0);
+ Ccmp(temp, -12, NFlag, le);
+ Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
+ }
+ SetStackPointer(old_stack_pointer);
+ }
+}
+
+
+void MacroAssembler::Jump(Register target) {
+ Br(target);
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, Operand(target, rmode));
+ Br(temp);
+}
+
+
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ AllowDeferredHandleDereference embedding_raw_address;
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+void MacroAssembler::Call(Register target) {
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ Blr(target);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
+#endif
+}
+
+
+void MacroAssembler::Call(Label* target) {
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ Bl(target);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
+#endif
+}
+
+
+// MacroAssembler::CallSize is sensitive to changes in this function, as it
+// requires to know how many instructions are used to branch to the target.
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+ // Statement positions are expected to be recorded when the target
+ // address is loaded.
+ positions_recorder()->WriteRecordedPositions();
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ if (rmode == RelocInfo::NONE64) {
+ // Addresses are 48 bits so we never need to load the upper 16 bits.
+ uint64_t imm = reinterpret_cast<uint64_t>(target);
+ // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
+ ASSERT(((imm >> 48) & 0xffff) == 0);
+ movz(temp, (imm >> 0) & 0xffff, 0);
+ movk(temp, (imm >> 16) & 0xffff, 16);
+ movk(temp, (imm >> 32) & 0xffff, 32);
+ } else {
+ LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
+ }
+ Blr(temp);
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
+#endif
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
+ SetRecordedAstId(ast_id);
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
+
+ AllowDeferredHandleDereference embedding_raw_address;
+ Call(reinterpret_cast<Address>(code.location()), rmode);
+
+#ifdef DEBUG
+ // Check the size of the code generated.
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
+#endif
+}
+
+
+int MacroAssembler::CallSize(Register target) {
+ USE(target);
+ return kInstructionSize;
+}
+
+
+int MacroAssembler::CallSize(Label* target) {
+ USE(target);
+ return kInstructionSize;
+}
+
+
+int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
+ USE(target);
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ return kCallSizeWithoutRelocation;
+ } else {
+ return kCallSizeWithRelocation;
+ }
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+ USE(code);
+ USE(ast_id);
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ return kCallSizeWithoutRelocation;
+ } else {
+ return kCallSizeWithRelocation;
+ }
+}
+
+
+
+
+
+void MacroAssembler::JumpForHeapNumber(Register object,
+ Register heap_number_map,
+ Label* on_heap_number,
+ Label* on_not_heap_number) {
+ ASSERT(on_heap_number || on_not_heap_number);
+ AssertNotSmi(object);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ // Load the HeapNumber map if it is not passed.
+ if (heap_number_map.Is(NoReg)) {
+ heap_number_map = temps.AcquireX();
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ } else {
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ }
+
+ ASSERT(!AreAliased(temp, heap_number_map));
+
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ Cmp(temp, heap_number_map);
+
+ if (on_heap_number) {
+ B(eq, on_heap_number);
+ }
+ if (on_not_heap_number) {
+ B(ne, on_not_heap_number);
+ }
+}
+
+
+void MacroAssembler::JumpIfHeapNumber(Register object,
+ Label* on_heap_number,
+ Register heap_number_map) {
+ JumpForHeapNumber(object,
+ heap_number_map,
+ on_heap_number,
+ NULL);
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Label* on_not_heap_number,
+ Register heap_number_map) {
+ JumpForHeapNumber(object,
+ heap_number_map,
+ NULL,
+ on_not_heap_number);
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
+
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
+ FixedArray::kLengthOffset));
+ Asr(mask, mask, 1); // Divide length by two.
+ Sub(mask, mask, 1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
+ Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
+ Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
+ Eor(scratch1, scratch1, scratch2);
+ And(scratch1, scratch1, mask);
+
+ // Calculate address of entry in string cache: each entry consists of two
+ // pointer sized fields.
+ Add(scratch1, number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ Fcmp(d0, d1);
+ B(ne, not_found);
+ B(&load_result_from_cache);
+
+ Bind(&is_smi);
+ Register scratch = scratch1;
+ And(scratch, mask, Operand::UntagSmi(object));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ Add(scratch, number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Cmp(object, probe);
+ B(ne, not_found);
+
+ // Get the result from the cache.
+ Bind(&load_result_from_cache);
+ Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
+ scratch1, scratch2);
+}
+
+
+void MacroAssembler::TryConvertDoubleToInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion) {
+ // Convert to an int and back again, then compare with the original value.
+ Fcvtzs(as_int, value);
+ Scvtf(scratch_d, as_int);
+ Fcmp(value, scratch_d);
+
+ if (on_successful_conversion) {
+ B(on_successful_conversion, eq);
+ }
+ if (on_failed_conversion) {
+ B(on_failed_conversion, ne);
+ }
+}
+
+
+void MacroAssembler::TestForMinusZero(DoubleRegister input) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
+ // cause overflow.
+ Fmov(temp, input);
+ Cmp(temp, 1);
+}
+
+
+void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
+ Label* on_negative_zero) {
+ TestForMinusZero(input);
+ B(vs, on_negative_zero);
+}
+
+
+void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
+ // Clamp the value to [0..255].
+ Cmp(input.W(), Operand(input.W(), UXTB));
+ // If input < input & 0xff, it must be < 0, so saturate to 0.
+ Csel(output.W(), wzr, input.W(), lt);
+ // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
+ Csel(output.W(), output.W(), 255, le);
+}
+
+
+void MacroAssembler::ClampInt32ToUint8(Register in_out) {
+ ClampInt32ToUint8(in_out, in_out);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register output,
+ DoubleRegister input,
+ DoubleRegister dbl_scratch) {
+ // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
+ // - Inputs lower than 0 (including -infinity) produce 0.
+ // - Inputs higher than 255 (including +infinity) produce 255.
+ // Also, it seems that PIXEL types use round-to-nearest rather than
+ // round-towards-zero.
+
+ // Squash +infinity before the conversion, since Fcvtnu will normally
+ // convert it to 0.
+ Fmov(dbl_scratch, 255);
+ Fmin(dbl_scratch, dbl_scratch, input);
+
+ // Convert double to unsigned integer. Values less than zero become zero.
+ // Values greater than 255 have already been clamped to 255.
+ Fcvtnu(output, dbl_scratch);
+}
+
+
+void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in a tight loop.
+ ASSERT(!AreAliased(dst, src,
+ scratch1, scratch2, scratch3, scratch4, scratch5));
+ ASSERT(count >= 2);
+
+ const Register& remaining = scratch3;
+ Mov(remaining, count / 2);
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ Sub(dst_untagged, dst, kHeapObjectTag);
+ Sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields in pairs.
+ Label loop;
+ Bind(&loop);
+ Ldp(scratch4, scratch5,
+ MemOperand(src_untagged, kXRegSize* 2, PostIndex));
+ Stp(scratch4, scratch5,
+ MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
+ Sub(remaining, remaining, 1);
+ Cbnz(remaining, &loop);
+
+ // Handle the leftovers.
+ if (count & 1) {
+ Ldr(scratch4, MemOperand(src_untagged));
+ Str(scratch4, MemOperand(dst_untagged));
+ }
+}
+
+
+void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ sub(dst_untagged, dst, kHeapObjectTag);
+ sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields in pairs.
+ for (unsigned i = 0; i < count / 2; i++) {
+ Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
+ Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
+ }
+
+ // Handle the leftovers.
+ if (count & 1) {
+ Ldr(scratch3, MemOperand(src_untagged));
+ Str(scratch3, MemOperand(dst_untagged));
+ }
+}
+
+
+void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ Sub(dst_untagged, dst, kHeapObjectTag);
+ Sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields one by one.
+ for (unsigned i = 0; i < count; i++) {
+ Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
+ Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
+ }
+}
+
+
+void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
+ unsigned count) {
+ // One of two methods is used:
+ //
+ // For high 'count' values where many scratch registers are available:
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in a tight loop.
+ //
+ // For low 'count' values or where few scratch registers are available:
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ //
+ // In both cases, fields are copied in pairs if possible, and left-overs are
+ // handled separately.
+ ASSERT(!AreAliased(dst, src));
+ ASSERT(!temps.IncludesAliasOf(dst));
+ ASSERT(!temps.IncludesAliasOf(src));
+ ASSERT(!temps.IncludesAliasOf(xzr));
+
+ if (emit_debug_code()) {
+ Cmp(dst, src);
+ Check(ne, kTheSourceAndDestinationAreTheSame);
+ }
+
+ // The value of 'count' at which a loop will be generated (if there are
+ // enough scratch registers).
+ static const unsigned kLoopThreshold = 8;
+
+ UseScratchRegisterScope masm_temps(this);
+ if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
+ CopyFieldsLoopPairsHelper(dst, src, count,
+ Register(temps.PopLowestIndex()),
+ Register(temps.PopLowestIndex()),
+ Register(temps.PopLowestIndex()),
+ masm_temps.AcquireX(),
+ masm_temps.AcquireX());
+ } else if (temps.Count() >= 2) {
+ CopyFieldsUnrolledPairsHelper(dst, src, count,
+ Register(temps.PopLowestIndex()),
+ Register(temps.PopLowestIndex()),
+ masm_temps.AcquireX(),
+ masm_temps.AcquireX());
+ } else if (temps.Count() == 1) {
+ CopyFieldsUnrolledHelper(dst, src, count,
+ Register(temps.PopLowestIndex()),
+ masm_temps.AcquireX(),
+ masm_temps.AcquireX());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::CopyBytes(Register dst,
+ Register src,
+ Register length,
+ Register scratch,
+ CopyHint hint) {
+ UseScratchRegisterScope temps(this);
+ Register tmp1 = temps.AcquireX();
+ Register tmp2 = temps.AcquireX();
+ ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
+ ASSERT(!AreAliased(src, dst, csp));
+
+ if (emit_debug_code()) {
+ // Check copy length.
+ Cmp(length, 0);
+ Assert(ge, kUnexpectedNegativeValue);
+
+ // Check src and dst buffers don't overlap.
+ Add(scratch, src, length); // Calculate end of src buffer.
+ Cmp(scratch, dst);
+ Add(scratch, dst, length); // Calculate end of dst buffer.
+ Ccmp(scratch, src, ZFlag, gt);
+ Assert(le, kCopyBuffersOverlap);
+ }
+
+ Label short_copy, short_loop, bulk_loop, done;
+
+ if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
+ Register bulk_length = scratch;
+ int pair_size = 2 * kXRegSize;
+ int pair_mask = pair_size - 1;
+
+ Bic(bulk_length, length, pair_mask);
+ Cbz(bulk_length, &short_copy);
+ Bind(&bulk_loop);
+ Sub(bulk_length, bulk_length, pair_size);
+ Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
+ Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
+ Cbnz(bulk_length, &bulk_loop);
+
+ And(length, length, pair_mask);
+ }
+
+ Bind(&short_copy);
+ Cbz(length, &done);
+ Bind(&short_loop);
+ Sub(length, length, 1);
+ Ldrb(tmp1, MemOperand(src, 1, PostIndex));
+ Strb(tmp1, MemOperand(dst, 1, PostIndex));
+ Cbnz(length, &short_loop);
+
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::FillFields(Register dst,
+ Register field_count,
+ Register filler) {
+ ASSERT(!dst.Is(csp));
+ UseScratchRegisterScope temps(this);
+ Register field_ptr = temps.AcquireX();
+ Register counter = temps.AcquireX();
+ Label done;
+
+ // Decrement count. If the result < zero, count was zero, and there's nothing
+ // to do. If count was one, flags are set to fail the gt condition at the end
+ // of the pairs loop.
+ Subs(counter, field_count, 1);
+ B(lt, &done);
+
+ // There's at least one field to fill, so do this unconditionally.
+ Str(filler, MemOperand(dst, kPointerSize, PostIndex));
+
+ // If the bottom bit of counter is set, there are an even number of fields to
+ // fill, so pull the start pointer back by one field, allowing the pairs loop
+ // to overwrite the field that was stored above.
+ And(field_ptr, counter, 1);
+ Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
+
+ // Store filler to memory in pairs.
+ Label entry, loop;
+ B(&entry);
+ Bind(&loop);
+ Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
+ Subs(counter, counter, 2);
+ Bind(&entry);
+ B(gt, &loop);
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure,
+ SmiCheckType smi_check) {
+
+ if (smi_check == DO_SMI_CHECK) {
+ JumpIfEitherSmi(first, second, failure);
+ } else if (emit_debug_code()) {
+ ASSERT(smi_check == DONT_DO_SMI_CHECK);
+ Label not_smi;
+ JumpIfEitherSmi(first, second, NULL, &not_smi);
+
+ // At least one input is a smi, but the flags indicated a smi check wasn't
+ // needed.
+ Abort(kUnexpectedSmi);
+
+ Bind(&not_smi);
+ }
+
+ // Test that both first and second are sequential ASCII strings.
+ Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
+ scratch2,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ ASSERT(!AreAliased(scratch1, second));
+ ASSERT(!AreAliased(scratch1, scratch2));
+ static const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ And(scratch1, first, kFlatAsciiStringMask);
+ And(scratch2, second, kFlatAsciiStringMask);
+ Cmp(scratch1, kFlatAsciiStringTag);
+ Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch, type, kFlatAsciiStringMask);
+ Cmp(scratch, kFlatAsciiStringTag);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ ASSERT(!AreAliased(first, second, scratch1, scratch2));
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch1, first, kFlatAsciiStringMask);
+ And(scratch2, second, kFlatAsciiStringMask);
+ Cmp(scratch1, kFlatAsciiStringTag);
+ Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Register type,
+ Label* not_unique_name) {
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
+ // continue
+ // } else {
+ // goto not_unique_name
+ // }
+ Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
+ Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
+ B(ne, not_unique_name);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag,
+ bool* definitely_mismatches,
+ const CallWrapper& call_wrapper) {
+ bool definitely_matches = false;
+ *definitely_mismatches = false;
+ Label regular_invoke;
+
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
+ // x0: actual arguments count.
+ // x1: function (passed through to callee).
+ // x2: expected arguments count.
+
+ // The code below is made a lot easier because the calling code already sets
+ // up actual and expected registers according to the contract if values are
+ // passed in registers.
+ ASSERT(actual.is_immediate() || actual.reg().is(x0));
+ ASSERT(expected.is_immediate() || expected.reg().is(x2));
+ ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
+
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+
+ } else {
+ Mov(x0, actual.immediate());
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ *definitely_mismatches = true;
+ // Set up x2 for the argument adaptor.
+ Mov(x2, expected.immediate());
+ }
+ }
+
+ } else { // expected is a register.
+ Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
+ : Operand(actual.reg());
+ // If actual == expected perform a regular invocation.
+ Cmp(expected.reg(), actual_op);
+ B(eq, &regular_invoke);
+ // Otherwise set up x0 for the argument adaptor.
+ Mov(x0, actual_op);
+ }
+
+ // If the argument counts may mismatch, generate a call to the argument
+ // adaptor.
+ if (!definitely_matches) {
+ if (!code_constant.is_null()) {
+ Mov(x3, Operand(code_constant));
+ Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
+ }
+
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor));
+ Call(adaptor);
+ call_wrapper.AfterCall();
+ if (!*definitely_mismatches) {
+ // If the arg counts don't match, no extra code is emitted by
+ // MAsm::InvokeCode and we can just fall through.
+ B(done);
+ }
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ }
+ Bind(&regular_invoke);
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ Label done;
+
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ &definitely_mismatches, call_wrapper);
+
+ // If we are certain that actual != expected, then we know InvokePrologue will
+ // have handled the call through the argument adaptor mechanism.
+ // The called function expects the call kind in x5.
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ Call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(code);
+ }
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ Bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ ASSERT(function.is(x1));
+
+ Register expected_reg = x2;
+ Register code_reg = x3;
+
+ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // The number of arguments is stored as an int32_t, and -1 is a marker
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // extension to correctly handle it.
+ Ldr(expected_reg, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+ Ldrsw(expected_reg,
+ FieldMemOperand(expected_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ Ldr(code_reg,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+
+ ParameterCount expected(expected_reg);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ ASSERT(function.Is(x1));
+
+ Register code_reg = x3;
+
+ // Set up the context.
+ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ __ LoadObject(x1, function);
+ InvokeFunction(x1, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::TryConvertDoubleToInt64(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ // Try to convert with an FPU convert instruction. It's trivial to compute
+ // the modulo operation on an integer register so we convert to a 64-bit
+ // integer.
+ //
+ // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
+ // when the double is out of range. NaNs and infinities will be converted to 0
+ // (as ECMA-262 requires).
+ Fcvtzs(result.X(), double_input);
+
+ // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
+ // representable using a double, so if the result is one of those then we know
+ // that saturation occured, and we need to manually handle the conversion.
+ //
+ // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
+ // 1 will cause signed overflow.
+ Cmp(result.X(), 1);
+ Ccmp(result.X(), -1, VFlag, vc);
+
+ B(vc, done);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+ ASSERT(jssp.Is(StackPointer()));
+
+ // Try to convert the double to an int64. If successful, the bottom 32 bits
+ // contain our truncated int32 result.
+ TryConvertDoubleToInt64(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Push(lr);
+ Push(double_input); // Put input on stack.
+
+ DoubleToIStub stub(jssp,
+ result,
+ 0,
+ true, // is_truncating
+ true); // skip_fastpath
+ CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+
+ Drop(1, kDoubleSize); // Drop the double input on the stack.
+ Pop(lr);
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result,
+ Register object) {
+ Label done;
+ ASSERT(!result.is(object));
+ ASSERT(jssp.Is(StackPointer()));
+
+ Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // Try to convert the double to an int64. If successful, the bottom 32 bits
+ // contain our truncated int32 result.
+ TryConvertDoubleToInt64(result, fp_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Push(lr);
+ DoubleToIStub stub(object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true, // is_truncating
+ true); // skip_fastpath
+ CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+ Pop(lr);
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ ASSERT(StackPointer().Is(jssp));
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ __ Mov(temp, Smi::FromInt(StackFrame::STUB));
+ // Compiled stubs don't age, and so they don't need the predictable code
+ // ageing sequence.
+ __ Push(lr, fp, cp, temp);
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ if (isolate()->IsCodePreAgingActive()) {
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ __ EmitCodeAgeSequence(stub);
+ } else {
+ __ EmitFrameSetupForCodeAgePatching();
+ }
+ }
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ ASSERT(jssp.Is(StackPointer()));
+ UseScratchRegisterScope temps(this);
+ Register type_reg = temps.AcquireX();
+ Register code_reg = temps.AcquireX();
+
+ Push(lr, fp, cp);
+ Mov(type_reg, Smi::FromInt(type));
+ Mov(code_reg, Operand(CodeObject()));
+ Push(type_reg, code_reg);
+ // jssp[4] : lr
+ // jssp[3] : fp
+ // jssp[2] : cp
+ // jssp[1] : type
+ // jssp[0] : code object
+
+ // Adjust FP to point to saved FP.
+ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ ASSERT(jssp.Is(StackPointer()));
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer and return address.
+ Mov(jssp, fp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+}
+
+
+void MacroAssembler::ExitFramePreserveFPRegs() {
+ PushCPURegList(kCallerSavedFP);
+}
+
+
+void MacroAssembler::ExitFrameRestoreFPRegs() {
+ // Read the registers from the stack without popping them. The stack pointer
+ // will be reset as part of the unwinding process.
+ CPURegList saved_fp_regs = kCallerSavedFP;
+ ASSERT(saved_fp_regs.Count() % 2 == 0);
+
+ int offset = ExitFrameConstants::kLastExitFrameField;
+ while (!saved_fp_regs.IsEmpty()) {
+ const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
+ const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
+ offset -= 2 * kDRegSize;
+ Ldp(dst1, dst0, MemOperand(fp, offset));
+ }
+}
+
+
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+ const Register& scratch,
+ int extra_space) {
+ ASSERT(jssp.Is(StackPointer()));
+
+ // Set up the new stack frame.
+ Mov(scratch, Operand(CodeObject()));
+ Push(lr, fp);
+ Mov(fp, StackPointer());
+ Push(xzr, scratch);
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // jssp -> fp[-16]: CodeObject()
+ STATIC_ASSERT((2 * kPointerSize) ==
+ ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
+ STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
+ STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
+
+ // Save the frame pointer and context pointer in the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate())));
+ Str(fp, MemOperand(scratch));
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Str(cp, MemOperand(scratch));
+
+ STATIC_ASSERT((-2 * kPointerSize) ==
+ ExitFrameConstants::kLastExitFrameField);
+ if (save_doubles) {
+ ExitFramePreserveFPRegs();
+ }
+
+ // Reserve space for the return address and for user requested memory.
+ // We do this before aligning to make sure that we end up correctly
+ // aligned with the minimum of wasted space.
+ Claim(extra_space + 1, kXRegSize);
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // jssp[8]: Extra space reserved for caller (if extra_space != 0).
+ // jssp -> jssp[0]: Space reserved for the return address.
+
+ // Align and synchronize the system stack pointer with jssp.
+ AlignAndSetCSPForFrame();
+ ASSERT(csp.Is(StackPointer()));
+
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // Alignment padding, if necessary.
+ // csp -> csp[0]: Space reserved for the return address.
+
+ // ExitFrame::GetStateForFramePointer expects to find the return address at
+ // the memory address immediately below the pointer stored in SPOffset.
+ // It is not safe to derive much else from SPOffset, because the size of the
+ // padding can vary.
+ Add(scratch, csp, kXRegSize);
+ Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+
+// Leave the current exit frame.
+void MacroAssembler::LeaveExitFrame(bool restore_doubles,
+ const Register& scratch,
+ bool restore_context) {
+ ASSERT(csp.Is(StackPointer()));
+
+ if (restore_doubles) {
+ ExitFrameRestoreFPRegs();
+ }
+
+ // Restore the context pointer from the top frame.
+ if (restore_context) {
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Ldr(cp, MemOperand(scratch));
+ }
+
+ if (emit_debug_code()) {
+ // Also emit debug code to clear the cp in the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Str(xzr, MemOperand(scratch));
+ }
+ // Clear the frame pointer from the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate())));
+ Str(xzr, MemOperand(scratch));
+
+ // Pop the exit frame.
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[...]: The rest of the frame.
+ Mov(jssp, fp);
+ SetStackPointer(jssp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Mov(scratch1, value);
+ Mov(scratch2, ExternalReference(counter));
+ Str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ ASSERT(value != 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Mov(scratch2, ExternalReference(counter));
+ Ldr(scratch1, MemOperand(scratch2));
+ Add(scratch1, scratch1, value);
+ Str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ IncrementCounter(counter, -value, scratch1, scratch2);
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in cp).
+ Mov(dst, cp);
+ }
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::DebugBreak() {
+ Mov(x0, 0);
+ Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
+ CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+}
+#endif
+
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+ int handler_index) {
+ ASSERT(jssp.Is(StackPointer()));
+ // Adjust this code if the asserts don't hold.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve the live registers x0-x4.
+ // (See JSEntryStub::GenerateBody().)
+
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+
+ // Set up the code object and the state for pushing.
+ Mov(x10, Operand(CodeObject()));
+ Mov(x11, state);
+
+ // Push the frame pointer, context, state, and code object.
+ if (kind == StackHandler::JS_ENTRY) {
+ ASSERT(Smi::FromInt(0) == 0);
+ Push(xzr, xzr, x11, x10);
+ } else {
+ Push(fp, cp, x11, x10);
+ }
+
+ // Link the current handler as the next handler.
+ Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
+ Ldr(x10, MemOperand(x11));
+ Push(x10);
+ // Set this new handler as the current one.
+ Str(jssp, MemOperand(x11));
+}
+
+
+void MacroAssembler::PopTryHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ Pop(x10);
+ Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
+ Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
+ Str(x10, MemOperand(x11));
+}
+
+
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ // We apply salt to the original zap value to easily spot the values.
+ Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
+ Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ }
+ B(gc_required);
+ return;
+ }
+
+ UseScratchRegisterScope temps(this);
+ Register scratch3 = temps.AcquireX();
+
+ ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
+ ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ ASSERT(0 == (object_size & kObjectAlignmentMask));
+
+ // Check relative positions of allocation top and limit addresses.
+ // The values must be adjacent in memory to allow the use of LDP.
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference heap_allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch1;
+ Register allocation_limit = scratch2;
+ Mov(top_address, Operand(heap_allocation_top));
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and the allocation limit.
+ Ldp(result, allocation_limit, MemOperand(top_address));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ Ldr(scratch3, MemOperand(top_address));
+ Cmp(result, scratch3);
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load the allocation limit. 'result' already contains the allocation top.
+ Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ }
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and bail out if new space is exhausted.
+ Adds(scratch3, result, object_size);
+ B(vs, gc_required);
+ Cmp(scratch3, allocation_limit);
+ B(hi, gc_required);
+ Str(scratch3, MemOperand(top_address));
+
+ // Tag the object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Orr(result, result, kHeapObjectTag);
+ }
+}
+
+
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ // We apply salt to the original zap value to easily spot the values.
+ Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
+ Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ }
+ B(gc_required);
+ return;
+ }
+
+ UseScratchRegisterScope temps(this);
+ Register scratch3 = temps.AcquireX();
+
+ ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
+ ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
+ scratch1.Is64Bits() && scratch2.Is64Bits());
+
+ // Check relative positions of allocation top and limit addresses.
+ // The values must be adjacent in memory to allow the use of LDP.
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference heap_allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch1;
+ Register allocation_limit = scratch2;
+ Mov(top_address, heap_allocation_top);
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and the allocation limit.
+ Ldp(result, allocation_limit, MemOperand(top_address));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ Ldr(scratch3, MemOperand(top_address));
+ Cmp(result, scratch3);
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load the allocation limit. 'result' already contains the allocation top.
+ Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ }
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and bail out if new space is exhausted
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
+ } else {
+ Adds(scratch3, result, object_size);
+ }
+
+ if (emit_debug_code()) {
+ Tst(scratch3, kObjectAlignmentMask);
+ Check(eq, kUnalignedAllocationInNewSpace);
+ }
+
+ B(vs, gc_required);
+ Cmp(scratch3, allocation_limit);
+ B(hi, gc_required);
+ Str(scratch3, MemOperand(top_address));
+
+ // Tag the object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Orr(result, result, kHeapObjectTag);
+ }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ Bic(object, object, kHeapObjectTagMask);
+#ifdef DEBUG
+ // Check that the object un-allocated is below the current top.
+ Mov(scratch, new_space_allocation_top);
+ Ldr(scratch, MemOperand(scratch));
+ Cmp(object, scratch);
+ Check(lt, kUndoAllocationOfNonAllocatedMemory);
+#endif
+ // Write the address of the object to un-allocate as the current top.
+ Mov(scratch, new_space_allocation_top);
+ Str(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ Add(scratch1, length, length); // Length in bytes, not chars.
+ Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+ Bic(scratch1, scratch1, kObjectAlignmentMask);
+
+ // Allocate two-byte string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT(kCharSize == 1);
+ Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
+ Bic(scratch1, scratch1, kObjectAlignmentMask);
+
+ // Allocate ASCII string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+ Mov(scratch1, high_promotion_mode);
+ Ldr(scratch1, MemOperand(scratch1));
+ Cbz(scratch1, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ B(&install_map);
+
+ Bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ Bind(&install_map);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map) {
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Store heap number map in the allocated object.
+ if (heap_number_map.Is(NoReg)) {
+ heap_number_map = scratch1;
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ }
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+ DoubleRegister value,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map) {
+ // TODO(all): Check if it would be more efficient to use STP to store both
+ // the map and the value.
+ AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map);
+ Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+void MacroAssembler::JumpIfObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_cond_pass,
+ Condition cond) {
+ CompareObjectType(object, map, type_reg, type);
+ B(cond, if_cond_pass);
+}
+
+
+void MacroAssembler::JumpIfNotObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_not_object) {
+ JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
+}
+
+
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type) {
+ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(map, type_reg, type);
+}
+
+
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareInstanceType(Register map,
+ Register type_reg,
+ InstanceType type) {
+ Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Cmp(type_reg, type);
+}
+
+
+void MacroAssembler::CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map) {
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareMap(scratch, map);
+}
+
+
+void MacroAssembler::CompareMap(Register obj_map,
+ Handle<Map> map) {
+ Cmp(obj_map, Operand(map));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+
+ CompareMap(obj, scratch, map);
+ B(ne, fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ JumpIfNotRoot(scratch, index, fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj_map,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj_map, fail);
+ }
+
+ CompareMap(obj_map, map);
+ B(ne, fail);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Cmp(scratch, Operand(map));
+ B(ne, &fail);
+ Jump(success, RelocInfo::CODE_TARGET);
+ Bind(&fail);
+}
+
+
+void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ Tst(temp, mask);
+}
+
+
+void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
+ // Load the map's "bit field 2".
+ __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ BoundFunctionAction action) {
+ ASSERT(!AreAliased(function, result, scratch));
+
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
+
+ if (action == kMissOnBoundFunction) {
+ Register scratch_w = scratch.W();
+ Ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ // On 64-bit platforms, compiler hints field is not a smi. See definition of
+ // kCompilerHintsOffset in src/objects.h.
+ Ldr(scratch_w,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
+ }
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ Ldr(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and simply
+ // miss the cache instead. This will allow us to allocate a prototype object
+ // on-demand in the runtime system.
+ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
+
+ // Get the prototype from the initial map.
+ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ Bind(&non_instance);
+ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ Bind(&done);
+}
+
+
+void MacroAssembler::CompareRoot(const Register& obj,
+ Heap::RootListIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ ASSERT(!AreAliased(obj, temp));
+ LoadRoot(temp, index);
+ Cmp(obj, temp);
+}
+
+
+void MacroAssembler::JumpIfRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_equal) {
+ CompareRoot(obj, index);
+ B(eq, if_equal);
+}
+
+
+void MacroAssembler::JumpIfNotRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_not_equal) {
+ CompareRoot(obj, index);
+ B(ne, if_not_equal);
+}
+
+
+void MacroAssembler::CompareAndSplit(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if ((if_true == if_false) && (if_false == fall_through)) {
+ // Fall through.
+ } else if (if_true == if_false) {
+ B(if_true);
+ } else if (if_false == fall_through) {
+ CompareAndBranch(lhs, rhs, cond, if_true);
+ } else if (if_true == fall_through) {
+ CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false);
+ } else {
+ CompareAndBranch(lhs, rhs, cond, if_true);
+ B(if_false);
+ }
+}
+
+
+void MacroAssembler::TestAndSplit(const Register& reg,
+ uint64_t bit_pattern,
+ Label* if_all_clear,
+ Label* if_any_set,
+ Label* fall_through) {
+ if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
+ // Fall through.
+ } else if (if_all_clear == if_any_set) {
+ B(if_all_clear);
+ } else if (if_all_clear == fall_through) {
+ TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
+ } else if (if_any_set == fall_through) {
+ TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
+ } else {
+ TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
+ B(if_all_clear);
+ }
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ // If cond==ls, set cond=hi, otherwise compare.
+ Ccmp(scratch,
+ Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
+ B(hi, fail);
+}
+
+
+// Note: The ARM version of this clobbers elements_reg, but this version does
+// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ FPRegister fpscratch1,
+ FPRegister fpscratch2,
+ Label* fail,
+ int elements_offset) {
+ ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+ Label store_num;
+
+ // Speculatively convert the smi to a double - all smis can be exactly
+ // represented as a double.
+ SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
+
+ // If value_reg is a smi, we're done.
+ JumpIfSmi(value_reg, &store_num);
+
+ // Ensure that the object is a heap number.
+ CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
+ fail, DONT_DO_SMI_CHECK);
+
+ Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+ // Check for NaN by comparing the number to itself: NaN comparison will
+ // report unordered, indicated by the overflow flag being set.
+ Fcmp(fpscratch1, fpscratch1);
+ Fcsel(fpscratch1, fpscratch2, fpscratch1, vs);
+
+ // Store the result.
+ Bind(&store_num);
+ Add(scratch1, elements_reg,
+ Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
+ Str(fpscratch1,
+ FieldMemOperand(scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ STATIC_ASSERT(kSmiTag == 0);
+ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ SmiTag(index, hash);
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(
+ Register string,
+ Register index,
+ SeqStringSetCharCheckIndexType index_type,
+ Register scratch,
+ uint32_t encoding_mask) {
+ ASSERT(!AreAliased(string, index, scratch));
+
+ if (index_type == kIndexIsSmi) {
+ AssertSmi(index);
+ }
+
+ // Check that string is an object.
+ AssertNotSmi(string, kNonObject);
+
+ // Check that string has an appropriate map.
+ Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
+ Cmp(scratch, encoding_mask);
+ Check(eq, kUnexpectedStringType);
+
+ Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
+ Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
+ Check(lt, kIndexIsTooLarge);
+
+ ASSERT_EQ(0, Smi::FromInt(0));
+ Cmp(index, 0);
+ Check(ge, kIndexIsNegative);
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+ Label same_contexts;
+
+ // Load current lexical context from the stack frame.
+ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ Cmp(scratch1, 0);
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
+#endif
+
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+ Ldr(scratch1, FieldMemOperand(scratch1, offset));
+ Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Read the first word and compare to the global_context_map.
+ Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
+ CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
+ Check(eq, kExpectedNativeContext);
+ }
+
+ // Check if both contexts are the same.
+ Ldr(scratch2, FieldMemOperand(holder_reg,
+ JSGlobalProxy::kNativeContextOffset));
+ Cmp(scratch1, scratch2);
+ B(&same_contexts, eq);
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // We're short on scratch registers here, so use holder_reg as a scratch.
+ Push(holder_reg);
+ Register scratch3 = holder_reg;
+
+ CompareRoot(scratch2, Heap::kNullValueRootIndex);
+ Check(ne, kExpectedNonNullContext);
+
+ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
+ Check(eq, kExpectedNativeContext);
+ Pop(holder_reg);
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
+ Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
+ Cmp(scratch1, scratch2);
+ B(miss, ne);
+
+ Bind(&same_contexts);
+}
+
+
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
+void MacroAssembler::GetNumberHash(Register key, Register scratch) {
+ ASSERT(!AreAliased(key, scratch));
+
+ // Xor original key with a seed.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ Eor(key, key, Operand::UntagSmi(scratch));
+
+ // The algorithm uses 32-bit integer values.
+ key = key.W();
+ scratch = scratch.W();
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash <<1 15);
+ Mvn(scratch, key);
+ Add(key, scratch, Operand(key, LSL, 15));
+ // hash = hash ^ (hash >> 12);
+ Eor(key, key, Operand(key, LSR, 12));
+ // hash = hash + (hash << 2);
+ Add(key, key, Operand(key, LSL, 2));
+ // hash = hash ^ (hash >> 4);
+ Eor(key, key, Operand(key, LSR, 4));
+ // hash = hash * 2057;
+ Mov(scratch, Operand(key, LSL, 11));
+ Add(key, key, Operand(key, LSL, 3));
+ Add(key, key, scratch);
+ // hash = hash ^ (hash >> 16);
+ Eor(key, key, Operand(key, LSR, 16));
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
+
+ Label done;
+
+ SmiUntag(scratch0, key);
+ GetNumberHash(scratch0, scratch1);
+
+ // Compute the capacity mask.
+ Ldrsw(scratch1,
+ UntagSmiFieldMemOperand(elements,
+ SeededNumberDictionary::kCapacityOffset));
+ Sub(scratch1, scratch1, 1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
+ } else {
+ Mov(scratch2, scratch0);
+ }
+ And(scratch2, scratch2, scratch1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
+ Ldr(scratch3,
+ FieldMemOperand(scratch2,
+ SeededNumberDictionary::kElementsStartOffset));
+ Cmp(key, scratch3);
+ if (i != (kNumberDictionaryProbes - 1)) {
+ B(eq, &done);
+ } else {
+ B(ne, miss);
+ }
+ }
+
+ Bind(&done);
+ // Check that the value is a normal property.
+ const int kDetailsOffset =
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+ Ldr(result, FieldMemOperand(scratch2, kValueOffset));
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch1,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ ASSERT(!AreAliased(object, address, scratch1));
+ Label done, store_buffer_overflow;
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfNotInNewSpace(object, &ok);
+ Abort(kRememberedSetPointerInNewSpace);
+ bind(&ok);
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.AcquireX();
+
+ // Load store buffer top.
+ Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
+ Ldr(scratch1, MemOperand(scratch2));
+ // Store pointer to buffer and increment buffer top.
+ Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
+ // Write back new top of buffer.
+ Str(scratch1, MemOperand(scratch2));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
+ (1 << (14 + kPointerSizeLog2)));
+ if (and_then == kFallThroughAtEnd) {
+ Tbz(scratch1, (14 + kPointerSizeLog2), &done);
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
+ Ret();
+ }
+
+ Bind(&store_buffer_overflow);
+ Push(lr);
+ StoreBufferOverflowStub store_buffer_overflow_stub =
+ StoreBufferOverflowStub(fp_mode);
+ CallStub(&store_buffer_overflow_stub);
+ Pop(lr);
+
+ Bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
+ }
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ PopXRegList(kSafepointSavedRegisters);
+ Drop(num_unsaved);
+}
+
+
+void MacroAssembler::PushSafepointRegisters() {
+ // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
+ // adjust the stack for unsaved registers.
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ ASSERT(num_unsaved >= 0);
+ Claim(num_unsaved);
+ PushXRegList(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+ PushSafepointRegisters();
+ PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ FPRegister::kAllocatableFPRegisters));
+}
+
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+ PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ FPRegister::kAllocatableFPRegisters));
+ PopSafepointRegisters();
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // Make sure the safepoint registers list is what we expect.
+ ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
+
+ // Safepoint registers are stored contiguously on the stack, but not all the
+ // registers are saved. The following registers are excluded:
+ // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
+ // the macro assembler.
+ // - x28 (jssp) because JS stack pointer doesn't need to be included in
+ // safepoint registers.
+ // - x31 (csp) because the system stack pointer doesn't need to be included
+ // in safepoint registers.
+ //
+ // This function implements the mapping of register code to index into the
+ // safepoint register slots.
+ if ((reg_code >= 0) && (reg_code <= 15)) {
+ return reg_code;
+ } else if ((reg_code >= 18) && (reg_code <= 27)) {
+ // Skip ip0 and ip1.
+ return reg_code - 2;
+ } else if ((reg_code == 29) || (reg_code == 30)) {
+ // Also skip jssp.
+ return reg_code - 3;
+ } else {
+ // This register has no safepoint register slot.
+ UNREACHABLE();
+ return -1;
+ }
+}
+
+
+void MacroAssembler::CheckPageFlagSet(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_any_set) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ TestAndBranchIfAnySet(scratch, mask, if_any_set);
+}
+
+
+void MacroAssembler::CheckPageFlagClear(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_all_clear) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ TestAndBranchIfAllClear(scratch, mask, if_all_clear);
+}
+
+
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip the barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ Add(scratch, object, offset - kHeapObjectTag);
+ if (emit_debug_code()) {
+ Label ok;
+ Tst(scratch, (1 << kPointerSizeLog2) - 1);
+ B(eq, &ok);
+ Abort(kUnalignedCellInWriteBarrier);
+ Bind(&ok);
+ }
+
+ RecordWrite(object,
+ scratch,
+ value,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK);
+
+ Bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
+ Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
+ }
+}
+
+
+// Will clobber: object, address, value.
+// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
+//
+// The register 'object' contains a heap object pointer. The heap object tag is
+// shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ ASM_LOCATION("MacroAssembler::RecordWrite");
+ ASSERT(!AreAliased(object, value));
+
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Ldr(temp, MemOperand(address));
+ Cmp(temp, value);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ CheckPageFlagClear(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ &done);
+ CheckPageFlagClear(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ &done);
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ Push(lr);
+ }
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ Pop(lr);
+ }
+
+ Bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
+ Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::AssertHasValidColor(const Register& reg) {
+ if (emit_debug_code()) {
+ // The bit sequence is backward. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label color_is_valid;
+ Tbnz(reg, 0, &color_is_valid);
+ Tbz(reg, 1, &color_is_valid);
+ Abort(kUnexpectedColorFound);
+ Bind(&color_is_valid);
+ }
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register shift_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
+ ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
+ // addr_reg is divided into fields:
+ // |63 page base 20|19 high 8|7 shift 3|2 0|
+ // 'high' gives the index of the cell holding color bits for the object.
+ // 'shift' gives the offset in the cell for this object's color.
+ const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
+ Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
+ Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
+ // bitmap_reg:
+ // |63 page base 20|19 zeros 15|14 high 3|2 0|
+ Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register shift_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ // See mark-compact.h for color definitions.
+ ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
+
+ GetMarkBits(object, bitmap_scratch, shift_scratch);
+ Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ // Shift the bitmap down to get the color of the object in bits [1:0].
+ Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
+
+ AssertHasValidColor(bitmap_scratch);
+
+ // These bit sequences are backwards. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+
+ // Check for the color.
+ if (first_bit == 0) {
+ // Checking for white.
+ ASSERT(second_bit == 0);
+ // We only need to test the first bit.
+ Tbz(bitmap_scratch, 0, has_color);
+ } else {
+ Label other_color;
+ // Checking for grey or black.
+ Tbz(bitmap_scratch, 0, &other_color);
+ if (second_bit == 0) {
+ Tbz(bitmap_scratch, 1, has_color);
+ } else {
+ Tbnz(bitmap_scratch, 1, has_color);
+ }
+ Bind(&other_color);
+ }
+
+ // Fall through if it does not have the right color.
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ Mov(scratch, Operand(map));
+ Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset));
+ TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
+ }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!AreAliased(object, scratch0, scratch1));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // Scratch contains elements pointer.
+ Mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ Bind(&loop_again);
+ Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
+ Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
+}
+
+
+void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
+ Register result) {
+ ASSERT(!result.Is(ldr_location));
+ const uint32_t kLdrLitOffset_lsb = 5;
+ const uint32_t kLdrLitOffset_width = 19;
+ Ldr(result, MemOperand(ldr_location));
+ if (emit_debug_code()) {
+ And(result, result, LoadLiteralFMask);
+ Cmp(result, LoadLiteralFixed);
+ Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
+ // The instruction was clobbered. Reload it.
+ Ldr(result, MemOperand(ldr_location));
+ }
+ Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
+ Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register shift_scratch,
+ Register load_scratch,
+ Register length_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(
+ value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
+
+ // These bit sequences are backwards. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+
+ GetMarkBits(value, bitmap_scratch, shift_scratch);
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Lsr(load_scratch, load_scratch, shift_scratch);
+
+ AssertHasValidColor(load_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ Label done;
+ Tbnz(load_scratch, 0, &done);
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ Register map = load_scratch; // Holds map while checking type.
+ Label is_data_object;
+
+ // Check for heap-number.
+ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ Mov(length_scratch, HeapNumber::kSize);
+ JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ TestAndBranchIfAnySet(instance_type,
+ kIsIndirectStringMask | kIsNotStringMask,
+ value_is_white_and_not_data);
+
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ Mov(length_scratch, ExternalString::kSize);
+ TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
+ String::kLengthOffset));
+ Tst(instance_type, kStringEncodingMask);
+ Cset(load_scratch, eq);
+ Lsl(length_scratch, length_scratch, load_scratch);
+ Add(length_scratch,
+ length_scratch,
+ SeqString::kHeaderSize + kObjectAlignmentMask);
+ Bic(length_scratch, length_scratch, kObjectAlignmentMask);
+
+ Bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ Register mask = shift_scratch;
+ Mov(load_scratch, 1);
+ Lsl(mask, load_scratch, shift_scratch);
+
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Orr(load_scratch, load_scratch, mask);
+ Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ Add(load_scratch, load_scratch, length_scratch);
+ Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
+ if (emit_debug_code()) {
+ Check(cond, reason);
+ }
+}
+
+
+
+void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
+ if (emit_debug_code()) {
+ CheckRegisterIsClear(reg, reason);
+ }
+}
+
+
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index,
+ BailoutReason reason) {
+ if (emit_debug_code()) {
+ CompareRoot(reg, index);
+ Check(eq, reason);
+ }
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Label ok;
+ Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
+ JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
+ JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
+ JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ Bind(&ok);
+ }
+}
+
+
+void MacroAssembler::AssertIsString(const Register& object) {
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, kOperandIsNotAString);
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
+ Check(lo, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::Check(Condition cond, BailoutReason reason) {
+ Label ok;
+ B(cond, &ok);
+ Abort(reason);
+ // Will not return here.
+ Bind(&ok);
+}
+
+
+void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
+ Label ok;
+ Cbz(reg, &ok);
+ Abort(reason);
+ // Will not return here.
+ Bind(&ok);
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(GetBailoutReason(reason));
+
+ if (FLAG_trap_on_abort) {
+ Brk(0);
+ return;
+ }
+#endif
+
+ // Abort is used in some contexts where csp is the stack pointer. In order to
+ // simplify the CallRuntime code, make sure that jssp is the stack pointer.
+ // There is no risk of register corruption here because Abort doesn't return.
+ Register old_stack_pointer = StackPointer();
+ SetStackPointer(jssp);
+ Mov(jssp, old_stack_pointer);
+
+ // We need some scratch registers for the MacroAssembler, so make sure we have
+ // some. This is safe here because Abort never returns.
+ RegList old_tmp_list = TmpList()->list();
+ TmpList()->Combine(ip0);
+ TmpList()->Combine(ip1);
+
+ if (use_real_aborts()) {
+ // Avoid infinite recursion; Push contains some assertions that use Abort.
+ NoUseRealAbortsScope no_real_aborts(this);
+
+ Mov(x0, Smi::FromInt(reason));
+ Push(x0);
+
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 1);
+ } else {
+ CallRuntime(Runtime::kAbort, 1);
+ }
+ } else {
+ // Load the string to pass to Printf.
+ Label msg_address;
+ Adr(x0, &msg_address);
+
+ // Call Printf directly to report the error.
+ CallPrintf();
+
+ // We need a way to stop execution on both the simulator and real hardware,
+ // and Unreachable() is the best option.
+ Unreachable();
+
+ // Emit the message string directly in the instruction stream.
+ {
+ BlockPoolsScope scope(this);
+ Bind(&msg_address);
+ EmitStringData(GetBailoutReason(reason));
+ }
+ }
+
+ SetStackPointer(old_stack_pointer);
+ TmpList()->set_list(old_tmp_list);
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch1,
+ Register scratch2,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ Ldr(scratch1, GlobalObjectMemOperand());
+ Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
+ size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ Ldr(scratch2, FieldMemOperand(scratch1, offset));
+ Cmp(map_in_out, scratch2);
+ B(ne, no_map_match);
+
+ // Use the transitioned cached map.
+ offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ Ldr(map_in_out, FieldMemOperand(scratch1, offset));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ Ldr(function, GlobalObjectMemOperand());
+ // Load the native context from the global or builtins object.
+ Ldr(function, FieldMemOperand(function,
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ Ldr(function, ContextMemOperand(function, index));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+ B(&ok);
+ Bind(&fail);
+ Abort(kGlobalFunctionsMustHaveInitialMap);
+ Bind(&ok);
+ }
+}
+
+
+// This is the main Printf implementation. All other Printf variants call
+// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
+void MacroAssembler::PrintfNoPreserve(const char * format,
+ const CPURegister& arg0,
+ const CPURegister& arg1,
+ const CPURegister& arg2,
+ const CPURegister& arg3) {
+ // We cannot handle a caller-saved stack pointer. It doesn't make much sense
+ // in most cases anyway, so this restriction shouldn't be too serious.
+ ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
+
+ // Make sure that the macro assembler doesn't try to use any of our arguments
+ // as scratch registers.
+ ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
+ ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
+
+ // We cannot print the stack pointer because it is typically used to preserve
+ // caller-saved registers (using other Printf variants which depend on this
+ // helper).
+ ASSERT(!AreAliased(arg0, StackPointer()));
+ ASSERT(!AreAliased(arg1, StackPointer()));
+ ASSERT(!AreAliased(arg2, StackPointer()));
+ ASSERT(!AreAliased(arg3, StackPointer()));
+
+ static const int kMaxArgCount = 4;
+ // Assume that we have the maximum number of arguments until we know
+ // otherwise.
+ int arg_count = kMaxArgCount;
+
+ // The provided arguments.
+ CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
+
+ // The PCS registers where the arguments need to end up.
+ CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
+
+ // Promote FP arguments to doubles, and integer arguments to X registers.
+ // Note that FP and integer arguments cannot be mixed, but we'll check
+ // AreSameSizeAndType once we've processed these promotions.
+ for (int i = 0; i < kMaxArgCount; i++) {
+ if (args[i].IsRegister()) {
+ // Note that we use x1 onwards, because x0 will hold the format string.
+ pcs[i] = Register::XRegFromCode(i + 1);
+ // For simplicity, we handle all integer arguments as X registers. An X
+ // register argument takes the same space as a W register argument in the
+ // PCS anyway. The only limitation is that we must explicitly clear the
+ // top word for W register arguments as the callee will expect it to be
+ // clear.
+ if (!args[i].Is64Bits()) {
+ const Register& as_x = args[i].X();
+ And(as_x, as_x, 0x00000000ffffffff);
+ args[i] = as_x;
+ }
+ } else if (args[i].IsFPRegister()) {
+ pcs[i] = FPRegister::DRegFromCode(i);
+ // C and C++ varargs functions (such as printf) implicitly promote float
+ // arguments to doubles.
+ if (!args[i].Is64Bits()) {
+ FPRegister s(args[i]);
+ const FPRegister& as_d = args[i].D();
+ Fcvt(as_d, s);
+ args[i] = as_d;
+ }
+ } else {
+ // This is the first empty (NoCPUReg) argument, so use it to set the
+ // argument count and bail out.
+ arg_count = i;
+ break;
+ }
+ }
+ ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
+ // Check that every remaining argument is NoCPUReg.
+ for (int i = arg_count; i < kMaxArgCount; i++) {
+ ASSERT(args[i].IsNone());
+ }
+ ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
+ args[2], args[3],
+ pcs[0], pcs[1],
+ pcs[2], pcs[3]));
+
+ // Move the arguments into the appropriate PCS registers.
+ //
+ // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
+ // surprisingly complicated.
+ //
+ // * For even numbers of registers, we push the arguments and then pop them
+ // into their final registers. This maintains 16-byte stack alignment in
+ // case csp is the stack pointer, since we're only handling X or D
+ // registers at this point.
+ //
+ // * For odd numbers of registers, we push and pop all but one register in
+ // the same way, but the left-over register is moved directly, since we
+ // can always safely move one register without clobbering any source.
+ if (arg_count >= 4) {
+ Push(args[3], args[2], args[1], args[0]);
+ } else if (arg_count >= 2) {
+ Push(args[1], args[0]);
+ }
+
+ if ((arg_count % 2) != 0) {
+ // Move the left-over register directly.
+ const CPURegister& leftover_arg = args[arg_count - 1];
+ const CPURegister& leftover_pcs = pcs[arg_count - 1];
+ if (leftover_arg.IsRegister()) {
+ Mov(Register(leftover_pcs), Register(leftover_arg));
+ } else {
+ Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
+ }
+ }
+
+ if (arg_count >= 4) {
+ Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
+ } else if (arg_count >= 2) {
+ Pop(pcs[0], pcs[1]);
+ }
+
+ // Load the format string into x0, as per the procedure-call standard.
+ //
+ // To make the code as portable as possible, the format string is encoded
+ // directly in the instruction stream. It might be cleaner to encode it in a
+ // literal pool, but since Printf is usually used for debugging, it is
+ // beneficial for it to be minimally dependent on other features.
+ Label format_address;
+ Adr(x0, &format_address);
+
+ // Emit the format string directly in the instruction stream.
+ { BlockPoolsScope scope(this);
+ Label after_data;
+ B(&after_data);
+ Bind(&format_address);
+ EmitStringData(format);
+ Unreachable();
+ Bind(&after_data);
+ }
+
+ // We don't pass any arguments on the stack, but we still need to align the C
+ // stack pointer to a 16-byte boundary for PCS compliance.
+ if (!csp.Is(StackPointer())) {
+ Bic(csp, StackPointer(), 0xf);
+ }
+
+ CallPrintf(pcs[0].type());
+}
+
+
+void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
+ // A call to printf needs special handling for the simulator, since the system
+ // printf function will use a different instruction set and the procedure-call
+ // standard will not be compatible.
+#ifdef USE_SIMULATOR
+ { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
+ hlt(kImmExceptionIsPrintf);
+ dc32(type);
+ }
+#else
+ Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
+#endif
+}
+
+
+void MacroAssembler::Printf(const char * format,
+ const CPURegister& arg0,
+ const CPURegister& arg1,
+ const CPURegister& arg2,
+ const CPURegister& arg3) {
+ // Printf is expected to preserve all registers, so make sure that none are
+ // available as scratch registers until we've preserved them.
+ RegList old_tmp_list = TmpList()->list();
+ RegList old_fp_tmp_list = FPTmpList()->list();
+ TmpList()->set_list(0);
+ FPTmpList()->set_list(0);
+
+ // Preserve all caller-saved registers as well as NZCV.
+ // If csp is the stack pointer, PushCPURegList asserts that the size of each
+ // list is a multiple of 16 bytes.
+ PushCPURegList(kCallerSaved);
+ PushCPURegList(kCallerSavedFP);
+
+ // We can use caller-saved registers as scratch values (except for argN).
+ CPURegList tmp_list = kCallerSaved;
+ CPURegList fp_tmp_list = kCallerSavedFP;
+ tmp_list.Remove(arg0, arg1, arg2, arg3);
+ fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
+ TmpList()->set_list(tmp_list.list());
+ FPTmpList()->set_list(fp_tmp_list.list());
+
+ // Preserve NZCV.
+ { UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mrs(tmp, NZCV);
+ Push(tmp, xzr);
+ }
+
+ PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+
+ { UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Pop(xzr, tmp);
+ Msr(NZCV, tmp);
+ }
+
+ PopCPURegList(kCallerSavedFP);
+ PopCPURegList(kCallerSaved);
+
+ TmpList()->set_list(old_tmp_list);
+ FPTmpList()->set_list(old_fp_tmp_list);
+}
+
+
+void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
+ // TODO(jbramley): Other architectures use the internal memcpy to copy the
+ // sequence. If this is a performance bottleneck, we should consider caching
+ // the sequence and copying it in the same way.
+ InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
+ ASSERT(jssp.Is(StackPointer()));
+ EmitFrameSetupForCodeAgePatching(this);
+}
+
+
+
+void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
+ InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
+ ASSERT(jssp.Is(StackPointer()));
+ EmitCodeAgeSequence(this, stub);
+}
+
+
+#undef __
+#define __ assm->
+
+
+void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
+ Label start;
+ __ bind(&start);
+
+ // We can do this sequence using four instructions, but the code ageing
+ // sequence that patches it needs five, so we use the extra space to try to
+ // simplify some addressing modes and remove some dependencies (compared to
+ // using two stp instructions with write-back).
+ __ sub(jssp, jssp, 4 * kXRegSize);
+ __ sub(csp, csp, 4 * kXRegSize);
+ __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
+ __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
+ __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
+}
+
+
+void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
+ Code * stub) {
+ Label start;
+ __ bind(&start);
+ // When the stub is called, the sequence is replaced with the young sequence
+ // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
+ // stub jumps to &start, stored in x0. The young sequence does not call the
+ // stub so there is no infinite loop here.
+ //
+ // A branch (br) is used rather than a call (blr) because this code replaces
+ // the frame setup code that would normally preserve lr.
+ __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
+ __ adr(x0, &start);
+ __ br(ip0);
+ // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
+ // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
+ if (stub) {
+ __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
+ }
+}
+
+
+bool MacroAssembler::IsYoungSequence(byte* sequence) {
+ // Generate a young sequence to compare with.
+ const int length = kCodeAgeSequenceSize / kInstructionSize;
+ static bool initialized = false;
+ static byte young[kCodeAgeSequenceSize];
+ if (!initialized) {
+ PatchingAssembler patcher(young, length);
+ // The young sequence is the frame setup code for FUNCTION code types. It is
+ // generated by FullCodeGenerator::Generate.
+ MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
+ initialized = true;
+ }
+
+ bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0);
+ ASSERT(is_young || IsCodeAgeSequence(sequence));
+ return is_young;
+}
+
+
+#ifdef DEBUG
+bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
+ // The old sequence varies depending on the code age. However, the code up
+ // until kCodeAgeStubEntryOffset does not change, so we can check that part to
+ // get a reasonable level of verification.
+ const int length = kCodeAgeStubEntryOffset / kInstructionSize;
+ static bool initialized = false;
+ static byte old[kCodeAgeStubEntryOffset];
+ if (!initialized) {
+ PatchingAssembler patcher(old, length);
+ MacroAssembler::EmitCodeAgeSequence(&patcher, NULL);
+ initialized = true;
+ }
+ return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0;
+}
+#endif
+
+
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!AreAliased(result, dividend));
+ ASSERT(result.Is32Bits() && dividend.Is32Bits());
+ MultiplierAndShift ms(divisor);
+ Mov(result, ms.multiplier());
+ Smull(result.X(), dividend, result);
+ Asr(result.X(), result.X(), 32);
+ if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
+ if (ms.shift() > 0) Asr(result, result, ms.shift());
+ Add(result, result, Operand(dividend, LSR, 31));
+}
+
+
+#undef __
+
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ available_->set_list(old_available_);
+ availablefp_->set_list(old_availablefp_);
+}
+
+
+Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
+ int code = AcquireNextAvailable(available_).code();
+ return Register::Create(code, reg.SizeInBits());
+}
+
+
+FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
+ int code = AcquireNextAvailable(availablefp_).code();
+ return FPRegister::Create(code, reg.SizeInBits());
+}
+
+
+CPURegister UseScratchRegisterScope::AcquireNextAvailable(
+ CPURegList* available) {
+ CHECK(!available->IsEmpty());
+ CPURegister result = available->PopLowestIndex();
+ ASSERT(!AreAliased(result, xzr, csp));
+ return result;
+}
+
+
+CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
+ const CPURegister& reg) {
+ ASSERT(available->IncludesAliasOf(reg));
+ available->Remove(reg);
+ return reg;
+}
+
+
+#define __ masm->
+
+
+void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
+ const Label* smi_check) {
+ Assembler::BlockPoolsScope scope(masm);
+ if (reg.IsValid()) {
+ ASSERT(smi_check->is_bound());
+ ASSERT(reg.Is64Bits());
+
+ // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
+ // 'check' in the other bits. The possible offset is limited in that we
+ // use BitField to pack the data, and the underlying data type is a
+ // uint32_t.
+ uint32_t delta = __ InstructionsGeneratedSince(smi_check);
+ __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
+ } else {
+ ASSERT(!smi_check->is_bound());
+
+ // An offset of 0 indicates that there is no patch site.
+ __ InlineData(0);
+ }
+}
+
+
+InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
+ : reg_(NoReg), smi_check_(NULL) {
+ InstructionSequence* inline_data = InstructionSequence::At(info);
+ ASSERT(inline_data->IsInlineData());
+ if (inline_data->IsInlineData()) {
+ uint64_t payload = inline_data->InlineData();
+ // We use BitField to decode the payload, and BitField can only handle
+ // 32-bit values.
+ ASSERT(is_uint32(payload));
+ if (payload != 0) {
+ int reg_code = RegisterBits::decode(payload);
+ reg_ = Register::XRegFromCode(reg_code);
+ uint64_t smi_check_delta = DeltaBits::decode(payload);
+ ASSERT(smi_check_delta != 0);
+ smi_check_ = inline_data->preceding(smi_check_delta);
+ }
+ }
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
new file mode 100644
index 0000000000..1777c38e35
--- /dev/null
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -0,0 +1,2310 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
+#define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
+
+#include <vector>
+
+#include "v8globals.h"
+#include "globals.h"
+
+#include "arm64/assembler-arm64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define LS_MACRO_LIST(V) \
+ V(Ldrb, Register&, rt, LDRB_w) \
+ V(Strb, Register&, rt, STRB_w) \
+ V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
+ V(Ldrh, Register&, rt, LDRH_w) \
+ V(Strh, Register&, rt, STRH_w) \
+ V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
+ V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
+ V(Str, CPURegister&, rt, StoreOpFor(rt)) \
+ V(Ldrsw, Register&, rt, LDRSW_x)
+
+
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset);
+inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
+
+// Generate a MemOperand for loading a SMI from memory.
+inline MemOperand UntagSmiMemOperand(Register object, int offset);
+
+
+// ----------------------------------------------------------------------------
+// MacroAssembler
+
+enum BranchType {
+ // Copies of architectural conditions.
+ // The associated conditions can be used in place of those, the code will
+ // take care of reinterpreting them with the correct type.
+ integer_eq = eq,
+ integer_ne = ne,
+ integer_hs = hs,
+ integer_lo = lo,
+ integer_mi = mi,
+ integer_pl = pl,
+ integer_vs = vs,
+ integer_vc = vc,
+ integer_hi = hi,
+ integer_ls = ls,
+ integer_ge = ge,
+ integer_lt = lt,
+ integer_gt = gt,
+ integer_le = le,
+ integer_al = al,
+ integer_nv = nv,
+
+ // These two are *different* from the architectural codes al and nv.
+ // 'always' is used to generate unconditional branches.
+ // 'never' is used to not generate a branch (generally as the inverse
+ // branch type of 'always).
+ always, never,
+ // cbz and cbnz
+ reg_zero, reg_not_zero,
+ // tbz and tbnz
+ reg_bit_clear, reg_bit_set,
+
+ // Aliases.
+ kBranchTypeFirstCondition = eq,
+ kBranchTypeLastCondition = nv,
+ kBranchTypeFirstUsingReg = reg_zero,
+ kBranchTypeFirstUsingBit = reg_bit_clear
+};
+
+inline BranchType InvertBranchType(BranchType type) {
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ return static_cast<BranchType>(
+ InvertCondition(static_cast<Condition>(type)));
+ } else {
+ return static_cast<BranchType>(type ^ 1);
+ }
+}
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+enum TargetAddressStorageMode {
+ CAN_INLINE_TARGET_ADDRESS,
+ NEVER_INLINE_TARGET_ADDRESS
+};
+enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
+enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
+enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
+enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
+enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
+
+class MacroAssembler : public Assembler {
+ public:
+ MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
+
+ inline Handle<Object> CodeObject();
+
+ // Instruction set functions ------------------------------------------------
+ // Logical macros.
+ inline void And(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Tst(const Register& rn, const Operand& operand);
+ void LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+
+ // Add and sub macros.
+ inline void Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Cmn(const Register& rn, const Operand& operand);
+ inline void Cmp(const Register& rn, const Operand& operand);
+ inline void Neg(const Register& rd,
+ const Operand& operand);
+ inline void Negs(const Register& rd,
+ const Operand& operand);
+
+ void AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ // Add/sub with carry macros.
+ inline void Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Ngc(const Register& rd,
+ const Operand& operand);
+ inline void Ngcs(const Register& rd,
+ const Operand& operand);
+ void AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Move macros.
+ void Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+ void Mov(const Register& rd, uint64_t imm);
+ inline void Mvn(const Register& rd, uint64_t imm);
+ void Mvn(const Register& rd, const Operand& operand);
+ static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+ static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+ static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+
+ // Conditional macros.
+ inline void Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ inline void Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ void Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond);
+
+ // Load/store macros.
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
+ inline void FN(const REGTYPE REG, const MemOperand& addr);
+ LS_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+
+ // V8-specific load/store helpers.
+ void Load(const Register& rt, const MemOperand& addr, Representation r);
+ void Store(const Register& rt, const MemOperand& addr, Representation r);
+
+ // Remaining instructions are simple pass-through calls to the assembler.
+ inline void Adr(const Register& rd, Label* label);
+ inline void Asr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Asr(const Register& rd, const Register& rn, const Register& rm);
+
+ // Branch type inversion relies on these relations.
+ STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
+ (reg_bit_clear == (reg_bit_set ^ 1)) &&
+ (always == (never ^ 1)));
+
+ void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
+
+ inline void B(Label* label);
+ inline void B(Condition cond, Label* label);
+ void B(Label* label, Condition cond);
+ inline void Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Bind(Label* label);
+ inline void Bl(Label* label);
+ inline void Blr(const Register& xn);
+ inline void Br(const Register& xn);
+ inline void Brk(int code);
+ void Cbnz(const Register& rt, Label* label);
+ void Cbz(const Register& rt, Label* label);
+ inline void Cinc(const Register& rd, const Register& rn, Condition cond);
+ inline void Cinv(const Register& rd, const Register& rn, Condition cond);
+ inline void Cls(const Register& rd, const Register& rn);
+ inline void Clz(const Register& rd, const Register& rn);
+ inline void Cneg(const Register& rd, const Register& rn, Condition cond);
+ inline void CzeroX(const Register& rd, Condition cond);
+ inline void CmovX(const Register& rd, const Register& rn, Condition cond);
+ inline void Cset(const Register& rd, Condition cond);
+ inline void Csetm(const Register& rd, Condition cond);
+ inline void Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Dmb(BarrierDomain domain, BarrierType type);
+ inline void Dsb(BarrierDomain domain, BarrierType type);
+ inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
+ inline void Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+ inline void Fabs(const FPRegister& fd, const FPRegister& fn);
+ inline void Fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+ inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
+ inline void Fcmp(const FPRegister& fn, double value);
+ inline void Fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+ inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
+ inline void Fcvtas(const Register& rd, const FPRegister& fn);
+ inline void Fcvtau(const Register& rd, const FPRegister& fn);
+ inline void Fcvtms(const Register& rd, const FPRegister& fn);
+ inline void Fcvtmu(const Register& rd, const FPRegister& fn);
+ inline void Fcvtns(const Register& rd, const FPRegister& fn);
+ inline void Fcvtnu(const Register& rd, const FPRegister& fn);
+ inline void Fcvtzs(const Register& rd, const FPRegister& fn);
+ inline void Fcvtzu(const Register& rd, const FPRegister& fn);
+ inline void Fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmov(FPRegister fd, FPRegister fn);
+ inline void Fmov(FPRegister fd, Register rn);
+ // Provide explicit double and float interfaces for FP immediate moves, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of fd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ inline void Fmov(FPRegister fd, double imm);
+ inline void Fmov(FPRegister fd, float imm);
+ // Provide a template to allow other types to be converted automatically.
+ template<typename T>
+ void Fmov(FPRegister fd, T imm) {
+ ASSERT(allow_macro_instructions_);
+ Fmov(fd, static_cast<double>(imm));
+ }
+ inline void Fmov(Register rd, FPRegister fn);
+ inline void Fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fneg(const FPRegister& fd, const FPRegister& fn);
+ inline void Fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Frinta(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintn(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintz(const FPRegister& fd, const FPRegister& fn);
+ inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
+ inline void Fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Hint(SystemHint code);
+ inline void Hlt(int code);
+ inline void Isb();
+ inline void Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src);
+ // Provide both double and float interfaces for FP immediate loads, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of fd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ inline void Ldr(const FPRegister& ft, double imm);
+ inline void Ldr(const FPRegister& ft, float imm);
+ inline void Ldr(const Register& rt, uint64_t imm);
+ inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
+ inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
+ inline void Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
+ inline void Mov(const Register& rd, const Register& rm);
+ inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
+ inline void Mrs(const Register& rt, SystemRegister sysreg);
+ inline void Msr(SystemRegister sysreg, const Register& rt);
+ inline void Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Mul(const Register& rd, const Register& rn, const Register& rm);
+ inline void Nop() { nop(); }
+ inline void Rbit(const Register& rd, const Register& rn);
+ inline void Ret(const Register& xn = lr);
+ inline void Rev(const Register& rd, const Register& rn);
+ inline void Rev16(const Register& rd, const Register& rn);
+ inline void Rev32(const Register& rd, const Register& rn);
+ inline void Ror(const Register& rd, const Register& rs, unsigned shift);
+ inline void Ror(const Register& rd, const Register& rn, const Register& rm);
+ inline void Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits = 0);
+ inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Smull(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+ inline void Smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+ inline void Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Sxtb(const Register& rd, const Register& rn);
+ inline void Sxth(const Register& rd, const Register& rn);
+ inline void Sxtw(const Register& rd, const Register& rn);
+ void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void Tbz(const Register& rt, unsigned bit_pos, Label* label);
+ inline void Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits = 0);
+ inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Uxtb(const Register& rd, const Register& rn);
+ inline void Uxth(const Register& rd, const Register& rn);
+ inline void Uxtw(const Register& rd, const Register& rn);
+
+ // Pseudo-instructions ------------------------------------------------------
+
+ // Compute rd = abs(rm).
+ // This function clobbers the condition flags.
+ //
+ // If rm is the minimum representable value, the result is not representable.
+ // Handlers for each case can be specified using the relevant labels.
+ void Abs(const Register& rd, const Register& rm,
+ Label * is_not_representable = NULL,
+ Label * is_representable = NULL);
+
+ // Push or pop up to 4 registers of the same width to or from the stack,
+ // using the current stack pointer as set by SetStackPointer.
+ //
+ // If an argument register is 'NoReg', all further arguments are also assumed
+ // to be 'NoReg', and are thus not pushed or popped.
+ //
+ // Arguments are ordered such that "Push(a, b);" is functionally equivalent
+ // to "Push(a); Push(b);".
+ //
+ // It is valid to push the same register more than once, and there is no
+ // restriction on the order in which registers are specified.
+ //
+ // It is not valid to pop into the same register more than once in one
+ // operation, not even into the zero register.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is csp, then it
+ // must be aligned to 16 bytes on entry and the total size of the specified
+ // registers must also be a multiple of 16 bytes.
+ //
+ // Even if the current stack pointer is not the system stack pointer (csp),
+ // Push (and derived methods) will still modify the system stack pointer in
+ // order to comply with ABI rules about accessing memory below the system
+ // stack pointer.
+ //
+ // Other than the registers passed into Pop, the stack pointer and (possibly)
+ // the system stack pointer, these methods do not modify any other registers.
+ void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
+ const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
+ void Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3,
+ const CPURegister& src4, const CPURegister& src5 = NoReg,
+ const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
+ void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
+ const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+
+ // Alternative forms of Push and Pop, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses (as in the A32 push
+ // and pop instructions).
+ //
+ // (Push|Pop)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
+ // kSRegSizeInBits are supported.
+ //
+ // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
+ void PushCPURegList(CPURegList registers);
+ void PopCPURegList(CPURegList registers);
+
+ inline void PushSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PushCPURegList(CPURegList(type, reg_size, registers));
+ }
+ inline void PopSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PopCPURegList(CPURegList(type, reg_size, registers));
+ }
+ inline void PushXRegList(RegList regs) {
+ PushSizeRegList(regs, kXRegSizeInBits);
+ }
+ inline void PopXRegList(RegList regs) {
+ PopSizeRegList(regs, kXRegSizeInBits);
+ }
+ inline void PushWRegList(RegList regs) {
+ PushSizeRegList(regs, kWRegSizeInBits);
+ }
+ inline void PopWRegList(RegList regs) {
+ PopSizeRegList(regs, kWRegSizeInBits);
+ }
+ inline void PushDRegList(RegList regs) {
+ PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
+ }
+ inline void PopDRegList(RegList regs) {
+ PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
+ }
+ inline void PushSRegList(RegList regs) {
+ PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
+ }
+ inline void PopSRegList(RegList regs) {
+ PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
+ }
+
+ // Push the specified register 'count' times.
+ void PushMultipleTimes(CPURegister src, Register count);
+ void PushMultipleTimes(CPURegister src, int count);
+
+ // This is a convenience method for pushing a single Handle<Object>.
+ inline void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+ // Aliases of Push and Pop, required for V8 compatibility.
+ inline void push(Register src) {
+ Push(src);
+ }
+ inline void pop(Register dst) {
+ Pop(dst);
+ }
+
+ // Sometimes callers need to push or pop multiple registers in a way that is
+ // difficult to structure efficiently for fixed Push or Pop calls. This scope
+ // allows push requests to be queued up, then flushed at once. The
+ // MacroAssembler will try to generate the most efficient sequence required.
+ //
+ // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
+ // register sizes and types.
+ class PushPopQueue {
+ public:
+ explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
+
+ ~PushPopQueue() {
+ ASSERT(queued_.empty());
+ }
+
+ void Queue(const CPURegister& rt) {
+ size_ += rt.SizeInBytes();
+ queued_.push_back(rt);
+ }
+
+ void PushQueued();
+ void PopQueued();
+
+ private:
+ MacroAssembler* masm_;
+ int size_;
+ std::vector<CPURegister> queued_;
+ };
+
+ // Poke 'src' onto the stack. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Poke(const CPURegister& src, const Operand& offset);
+
+ // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Peek(const CPURegister& dst, const Operand& offset);
+
+ // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
+ // with 'src2' at a higher address than 'src1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
+
+ // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
+ // values peeked will be adjacent, with the value in 'dst2' being from a
+ // higher address than 'dst1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
+
+ // Claim or drop stack space without actually accessing memory.
+ //
+ // In debug mode, both of these will write invalid data into the claimed or
+ // dropped space.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then it
+ // must be aligned to 16 bytes and the size claimed or dropped must be a
+ // multiple of 16 bytes.
+ //
+ // Note that unit_size must be specified in bytes. For variants which take a
+ // Register count, the unit size must be a power of two.
+ inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
+ inline void Claim(const Register& count,
+ uint64_t unit_size = kXRegSize);
+ inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
+ inline void Drop(const Register& count,
+ uint64_t unit_size = kXRegSize);
+
+ // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
+ // register.
+ inline void ClaimBySMI(const Register& count_smi,
+ uint64_t unit_size = kXRegSize);
+ inline void DropBySMI(const Register& count_smi,
+ uint64_t unit_size = kXRegSize);
+
+ // Compare a register with an operand, and branch to label depending on the
+ // condition. May corrupt the status flags.
+ inline void CompareAndBranch(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ANY of
+ // those bits are set. May corrupt the status flags.
+ inline void TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ALL of
+ // those bits are clear (ie. not set.) May corrupt the status flags.
+ inline void TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label);
+
+ // Insert one or more instructions into the instruction stream that encode
+ // some caller-defined data. The instructions used will be executable with no
+ // side effects.
+ inline void InlineData(uint64_t data);
+
+ // Insert an instrumentation enable marker into the instruction stream.
+ inline void EnableInstrumentation();
+
+ // Insert an instrumentation disable marker into the instruction stream.
+ inline void DisableInstrumentation();
+
+ // Insert an instrumentation event marker into the instruction stream. These
+ // will be picked up by the instrumentation system to annotate an instruction
+ // profile. The argument marker_name must be a printable two character string;
+ // it will be encoded in the event marker.
+ inline void AnnotateInstrumentation(const char* marker_name);
+
+ // If emit_debug_code() is true, emit a run-time check to ensure that
+ // StackPointer() does not point below the system stack pointer.
+ //
+ // Whilst it is architecturally legal for StackPointer() to point below csp,
+ // it can be evidence of a potential bug because the ABI forbids accesses
+ // below csp.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ //
+ // If StackPointer() is the system stack pointer, this emits no code.
+ void AssertStackConsistency();
+
+ // Preserve the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are pushed before lower-numbered registers, and
+ // thus get higher addresses.
+ // Floating-point registers are pushed before general-purpose registers, and
+ // thus get higher addresses.
+ //
+ // Note that registers are not checked for invalid values. Use this method
+ // only if you know that the GC won't try to examine the values on the stack.
+ //
+ // This method must not be called unless the current stack pointer (as set by
+ // SetStackPointer) is the system stack pointer (csp), and is aligned to
+ // ActivationFrameAlignment().
+ void PushCalleeSavedRegisters();
+
+ // Restore the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are popped after lower-numbered registers, and
+ // thus come from higher addresses.
+ // Floating-point registers are popped after general-purpose registers, and
+ // thus come from higher addresses.
+ //
+ // This method must not be called unless the current stack pointer (as set by
+ // SetStackPointer) is the system stack pointer (csp), and is aligned to
+ // ActivationFrameAlignment().
+ void PopCalleeSavedRegisters();
+
+ // Set the current stack pointer, but don't generate any code.
+ inline void SetStackPointer(const Register& stack_pointer) {
+ ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
+ sp_ = stack_pointer;
+ }
+
+ // Return the current stack pointer, as set by SetStackPointer.
+ inline const Register& StackPointer() const {
+ return sp_;
+ }
+
+ // Align csp for a frame, as per ActivationFrameAlignment, and make it the
+ // current stack pointer.
+ inline void AlignAndSetCSPForFrame() {
+ int sp_alignment = ActivationFrameAlignment();
+ // AAPCS64 mandates at least 16-byte alignment.
+ ASSERT(sp_alignment >= 16);
+ ASSERT(IsPowerOf2(sp_alignment));
+ Bic(csp, StackPointer(), sp_alignment - 1);
+ SetStackPointer(csp);
+ }
+
+ // Push the system stack pointer (csp) down to allow the same to be done to
+ // the current stack pointer (according to StackPointer()). This must be
+ // called _before_ accessing the memory.
+ //
+ // This is necessary when pushing or otherwise adding things to the stack, to
+ // satisfy the AAPCS64 constraint that the memory below the system stack
+ // pointer is not accessed.
+ //
+ // This method asserts that StackPointer() is not csp, since the call does
+ // not make sense in that context.
+ inline void BumpSystemStackPointer(const Operand& space);
+
+ // Helpers ------------------------------------------------------------------
+ // Root register.
+ inline void InitializeRootRegister();
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination,
+ Heap::RootListIndex index);
+ // Store an object to the root table.
+ void StoreRoot(Register source,
+ Heap::RootListIndex index);
+
+ // Load both TrueValue and FalseValue roots.
+ void LoadTrueFalseRoots(Register true_root, Register false_root);
+
+ void LoadHeapObject(Register dst, Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ ASSERT(object->IsSmi());
+ Mov(result, Operand(object));
+ }
+ }
+
+ static int SafepointRegisterStackIndex(int reg_code);
+
+ // This is required for compatibility with architecture independant code.
+ // Remove if not needed.
+ inline void Move(Register dst, Register src) { Mov(dst, src); }
+
+ void LoadInstanceDescriptors(Register map,
+ Register descriptors);
+ void EnumLengthUntagged(Register dst, Register map);
+ void EnumLengthSmi(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const uint64_t shift = Field::kShift + kSmiShift;
+ static const uint64_t setbits = CountSetBits(Field::kMask, 32);
+ Ubfx(reg, reg, shift, setbits);
+ }
+
+ // ---- SMI and Number Utilities ----
+
+ inline void SmiTag(Register dst, Register src);
+ inline void SmiTag(Register smi);
+ inline void SmiUntag(Register dst, Register src);
+ inline void SmiUntag(Register smi);
+ inline void SmiUntagToDouble(FPRegister dst,
+ Register src,
+ UntagMode mode = kNotSpeculativeUntag);
+ inline void SmiUntagToFloat(FPRegister dst,
+ Register src,
+ UntagMode mode = kNotSpeculativeUntag);
+
+ // Compute the absolute value of 'smi' and leave the result in 'smi'
+ // register. If 'smi' is the most negative SMI, the absolute value cannot
+ // be represented as a SMI and a jump to 'slow' is done.
+ void SmiAbs(const Register& smi, Label* slow);
+
+ inline void JumpIfSmi(Register value,
+ Label* smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label);
+ inline void JumpIfBothSmi(Register value1,
+ Register value2,
+ Label* both_smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfEitherSmi(Register value1,
+ Register value2,
+ Label* either_smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfEitherNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label);
+ inline void JumpIfBothNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
+ void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
+
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
+
+ void JumpForHeapNumber(Register object,
+ Register heap_number_map,
+ Label* on_heap_number,
+ Label* on_not_heap_number = NULL);
+ void JumpIfHeapNumber(Register object,
+ Label* on_heap_number,
+ Register heap_number_map = NoReg);
+ void JumpIfNotHeapNumber(Register object,
+ Label* on_not_heap_number,
+ Register heap_number_map = NoReg);
+
+ // Sets the vs flag if the input is -0.0.
+ void TestForMinusZero(DoubleRegister input);
+
+ // Jump to label if the input double register contains -0.0.
+ void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
+ // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
+ // output.
+ void ClampInt32ToUint8(Register in_out);
+ void ClampInt32ToUint8(Register output, Register input);
+
+ // Saturate a double in input to an unsigned 8-bit integer in output.
+ void ClampDoubleToUint8(Register output,
+ DoubleRegister input,
+ DoubleRegister dbl_scratch);
+
+ // Try to convert a double to a signed 32-bit int.
+ // This succeeds if the result compares equal to the input, so inputs of -0.0
+ // are converted to 0 and handled as a success.
+ //
+ // On output the Z flag is set if the conversion was successful.
+ void TryConvertDoubleToInt32(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion = NULL,
+ Label* on_failed_conversion = NULL) {
+ ASSERT(as_int.Is32Bits());
+ TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
+ on_failed_conversion);
+ }
+
+ // Try to convert a double to a signed 64-bit int.
+ // This succeeds if the result compares equal to the input, so inputs of -0.0
+ // are converted to 0 and handled as a success.
+ //
+ // On output the Z flag is set if the conversion was successful.
+ void TryConvertDoubleToInt64(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion = NULL,
+ Label* on_failed_conversion = NULL) {
+ ASSERT(as_int.Is64Bits());
+ TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
+ on_failed_conversion);
+ }
+
+ // ---- Object Utilities ----
+
+ // Copy fields from 'src' to 'dst', where both are tagged objects.
+ // The 'temps' list is a list of X registers which can be used for scratch
+ // values. The temps list must include at least one register.
+ //
+ // Currently, CopyFields cannot make use of more than three registers from
+ // the 'temps' list.
+ //
+ // CopyFields expects to be able to take at least two registers from
+ // MacroAssembler::TmpList().
+ void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
+
+ // Starting at address in dst, initialize field_count 64-bit fields with
+ // 64-bit value in register filler. Register dst is corrupted.
+ void FillFields(Register dst,
+ Register field_count,
+ Register filler);
+
+ // Copies a number of bytes from src to dst. All passed registers are
+ // clobbered. On exit src and dst will point to the place just after where the
+ // last byte was read or written and length will be zero. Hint may be used to
+ // determine which is the most efficient algorithm to use for copying.
+ void CopyBytes(Register dst,
+ Register src,
+ Register length,
+ Register scratch,
+ CopyHint hint = kCopyUnknown);
+
+ // ---- String Utilities ----
+
+
+ // Jump to label if either object is not a sequential ASCII string.
+ // Optionally perform a smi check on the objects first.
+ void JumpIfEitherIsNotSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure,
+ SmiCheckType smi_check = DO_SMI_CHECK);
+
+ // Check if instance type is sequential ASCII string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfEitherInstanceTypeIsNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ void JumpIfNotUniqueName(Register type, Label* not_unique_name);
+
+ // ---- Calling / Jumping helpers ----
+
+ // This is required for compatibility in architecture indepenedant code.
+ inline void jmp(Label* L) { B(L); }
+
+ // Passes thrown value to the handler of top of the try handler chain.
+ // Register value must be x0.
+ void Throw(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain. Register value must be x0.
+ void ThrowUncatchable(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
+ // Throw a message string as an exception if the value is a smi.
+ void ThrowIfSmi(const Register& value, BailoutReason reason);
+
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
+ void TailCallStub(CodeStub* stub);
+
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
+
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
+
+ void TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size);
+
+ int ActivationFrameAlignment();
+
+ // Calls a C function.
+ // The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments);
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions.
+ // 'stack_space' is the space to be unwound on exit (includes the call JS
+ // arguments space and the additional space allocated for the fast call).
+ // 'spill_offset' is the offset from the stack pointer where
+ // CallApiFunctionAndReturn can spill registers.
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
+
+ // The number of register that CallApiFunctionAndReturn will need to save on
+ // the stack. The space for these registers need to be allocated in the
+ // ExitFrame before calling CallApiFunctionAndReturn.
+ static const int kCallApiFunctionSpillSpace = 4;
+
+ // Jump to a runtime routine.
+ void JumpToExternalReference(const ExternalReference& builtin);
+ // Tail call of a runtime routine (jump).
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments);
+
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ // Store the code object for the given builtin in the target register and
+ // setup the function in the function register.
+ void GetBuiltinEntry(Register target,
+ Register function,
+ Builtins::JavaScript id);
+
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+ void Jump(Register target);
+ void Jump(Address target, RelocInfo::Mode rmode);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode);
+ void Jump(intptr_t target, RelocInfo::Mode rmode);
+
+ void Call(Register target);
+ void Call(Label* target);
+ void Call(Address target, RelocInfo::Mode rmode);
+ void Call(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // For every Call variant, there is a matching CallSize function that returns
+ // the size (in bytes) of the call sequence.
+ static int CallSize(Register target);
+ static int CallSize(Label* target);
+ static int CallSize(Address target, RelocInfo::Mode rmode);
+ static int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // Registers used through the invocation chain are hard-coded.
+ // We force passing the parameters to ensure the contracts are correctly
+ // honoured by the caller.
+ // 'function' must be x1.
+ // 'actual' must use an immediate or x0.
+ // 'expected' must use an immediate or x2.
+ // 'call_kind' must be x5.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag,
+ bool* definitely_mismatches,
+ const CallWrapper& call_wrapper);
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ // Invoke the JavaScript function in the given register.
+ // Changes the current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+
+ // ---- Floating point helpers ----
+
+ // Perform a conversion from a double to a signed int64. If the input fits in
+ // range of the 64-bit result, execution branches to done. Otherwise,
+ // execution falls through, and the sign of the result can be used to
+ // determine if overflow was towards positive or negative infinity.
+ //
+ // On successful conversion, the least significant 32 bits of the result are
+ // equivalent to the ECMA-262 operation "ToInt32".
+ //
+ // Only public for the test code in test-code-stubs-arm64.cc.
+ void TryConvertDoubleToInt64(Register result,
+ DoubleRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Label* not_int32);
+
+ // ---- Code generation helpers ----
+
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() const { return generating_stub_; }
+#if DEBUG
+ void set_allow_macro_instructions(bool value) {
+ allow_macro_instructions_ = value;
+ }
+ bool allow_macro_instructions() const { return allow_macro_instructions_; }
+#endif
+ bool use_real_aborts() const { return use_real_aborts_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
+ bool AllowThisStubCall(CodeStub* stub);
+
+ class NoUseRealAbortsScope {
+ public:
+ explicit NoUseRealAbortsScope(MacroAssembler* masm) :
+ saved_(masm->use_real_aborts_), masm_(masm) {
+ masm_->use_real_aborts_ = false;
+ }
+ ~NoUseRealAbortsScope() {
+ masm_->use_real_aborts_ = saved_;
+ }
+ private:
+ bool saved_;
+ MacroAssembler* masm_;
+ };
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void DebugBreak();
+#endif
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link into try handler chain.
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ // Must preserve the result register.
+ void PopTryHandler();
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. The allocated object is returned in result.
+ //
+ // If the new space is exhausted control continues at the gc_required label.
+ // In this case, the result and scratch registers may still be clobbered.
+ // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
+ void Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. The caller must make sure that no pointers
+ // are left to the object(s) no longer allocated as they would be invalid when
+ // allocation is undone.
+ void UndoAllocationInNewSpace(Register object, Register scratch);
+
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed.
+ // All registers are clobbered.
+ // If no heap_number_map register is provided, the function will take care of
+ // loading it.
+ void AllocateHeapNumber(Register result,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map = NoReg);
+ void AllocateHeapNumberWithValue(Register result,
+ DoubleRegister value,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map = NoReg);
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Try to get function prototype of a function and puts the value in the
+ // result register. Checks that the function really is a function and jumps
+ // to the miss label if the fast checks fail. The function register will be
+ // untouched; the other registers may be clobbered.
+ enum BoundFunctionAction {
+ kMissOnBoundFunction,
+ kDontMissOnBoundFunction
+ };
+
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ BoundFunctionAction action =
+ kDontMissOnBoundFunction);
+
+ // Compare object type for heap object. heap_object contains a non-Smi
+ // whose object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ // It leaves the map in the map register (unless the type_reg and map register
+ // are the same register). It leaves the heap object in the heap_object
+ // register unless the heap_object register is the same register as one of the
+ // other registers.
+ void CompareObjectType(Register heap_object,
+ Register map,
+ Register type_reg,
+ InstanceType type);
+
+
+ // Compare object type for heap object, and branch if equal (or not.)
+ // heap_object contains a non-Smi whose object type should be compared with
+ // the given type. This both sets the flags and leaves the object type in
+ // the type_reg register. It leaves the map in the map register (unless the
+ // type_reg and map register are the same register). It leaves the heap
+ // object in the heap_object register unless the heap_object register is the
+ // same register as one of the other registers.
+ void JumpIfObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_cond_pass,
+ Condition cond = eq);
+
+ void JumpIfNotObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_not_object);
+
+ // Compare instance type in a map. map contains a valid map object whose
+ // object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ void CompareInstanceType(Register map,
+ Register type_reg,
+ InstanceType type);
+
+ // Compare an object's map with the specified map. Condition flags are set
+ // with result of map compare.
+ void CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map);
+
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMap(Register obj_map,
+ Handle<Map> map);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
+ void CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // As above, but the map of the object is already loaded into obj_map, and is
+ // preserved.
+ void CheckMap(Register obj_map,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Test the bitfield of the heap object map with mask and set the condition
+ // flags. The object register is preserved.
+ void TestMapBitfield(Register object, uint64_t mask);
+
+ // Load the elements kind field from a map, and return it in the result
+ // register.
+ void LoadElementsKindFromMap(Register result, Register map);
+
+ // Compare the object in a register to a value from the root list.
+ void CompareRoot(const Register& obj, Heap::RootListIndex index);
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_equal);
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_not_equal);
+
+ // Load and check the instance type of an object for being a unique name.
+ // Loads the type into the second argument register.
+ // The object and type arguments can be the same register; in that case it
+ // will be overwritten with the type.
+ // Fall-through if the object was a string and jump on fail otherwise.
+ inline void IsObjectNameType(Register object, Register type, Label* fail);
+
+ inline void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check the instance type in the given map to see if it corresponds to a
+ // JS object type. Jump to the fail label if this is not the case and fall
+ // through otherwise. However if fail label is NULL, no branch will be
+ // performed and the flag will be updated. You can test the flag for "le"
+ // condition to test if it is a valid JS object type.
+ inline void IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // The object and type arguments can be the same register; in that case it
+ // will be overwritten with the type.
+ // Jumps to not_string or string appropriate. If the appropriate label is
+ // NULL, fall through.
+ inline void IsObjectJSStringType(Register object, Register type,
+ Label* not_string, Label* string = NULL);
+
+ // Compare the contents of a register with an operand, and branch to true,
+ // false or fall through, depending on condition.
+ void CompareAndSplit(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
+ // Test the bits of register defined by bit_pattern, and branch to
+ // if_any_set, if_all_clear or fall_through accordingly.
+ void TestAndSplit(const Register& reg,
+ uint64_t bit_pattern,
+ Label* if_all_clear,
+ Label* if_any_set,
+ Label* fall_through);
+
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map, Register scratch, Label* fail);
+
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map, Register scratch, Label* fail);
+
+ // Check to see if number can be stored as a double in FastDoubleElements.
+ // If it can, store it at the index specified by key_reg in the array,
+ // otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ FPRegister fpscratch1,
+ FPRegister fpscratch2,
+ Label* fail,
+ int elements_offset = 0);
+
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support.
+
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ SeqStringSetCharCheckIndexType index_type,
+ Register scratch,
+ uint32_t encoding_mask);
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss);
+
+ // Hash the interger value in 'key' register.
+ // It uses the same algorithm as ComputeIntegerHash in utils.h.
+ void GetNumberHash(Register key, Register scratch);
+
+ // Load value from the dictionary.
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ // ---------------------------------------------------------------------------
+ // Frames.
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Returns map with validated enum cache in object register.
+ void CheckEnumCache(Register object,
+ Register null_value,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* call_runtime);
+
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver should point to the array object.
+ // If allocation info is present, the Z flag is set (so that the eq
+ // condition will pass).
+ void TestJSArrayForAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
+ &no_memento_found);
+ B(eq, memento_found);
+ Bind(&no_memento_found);
+ }
+
+ // The stack pointer has to switch between csp and jssp when setting up and
+ // destroying the exit frame. Hence preserving/restoring the registers is
+ // slightly more complicated than simple push/pop operations.
+ void ExitFramePreserveFPRegs();
+ void ExitFrameRestoreFPRegs();
+
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
+ // Enter exit frame. Exit frames are used when calling C code from generated
+ // (JavaScript) code.
+ //
+ // The stack pointer must be jssp on entry, and will be set to csp by this
+ // function. The frame pointer is also configured, but the only other
+ // registers modified by this function are the provided scratch register, and
+ // jssp.
+ //
+ // The 'extra_space' argument can be used to allocate some space in the exit
+ // frame that will be ignored by the GC. This space will be reserved in the
+ // bottom of the frame immediately above the return address slot.
+ //
+ // Set up a stack frame and registers as follows:
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: SPOffset (new csp)
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
+ // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // Alignment padding, if necessary.
+ // csp -> csp[0]: Space reserved for the return address.
+ //
+ // This function also stores the new frame information in the top frame, so
+ // that the new frame becomes the current frame.
+ void EnterExitFrame(bool save_doubles,
+ const Register& scratch,
+ int extra_space = 0);
+
+ // Leave the current exit frame, after a C function has returned to generated
+ // (JavaScript) code.
+ //
+ // This effectively unwinds the operation of EnterExitFrame:
+ // * Preserved doubles are restored (if restore_doubles is true).
+ // * The frame information is removed from the top frame.
+ // * The exit frame is dropped.
+ // * The stack pointer is reset to jssp.
+ //
+ // The stack pointer must be csp on entry.
+ void LeaveExitFrame(bool save_doubles,
+ const Register& scratch,
+ bool restore_context);
+
+ void LoadContext(Register dst, int context_chain_length);
+
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+ // ---------------------------------------------------------------------------
+ // Garbage collector support (GC).
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch1,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+
+ void PushSafepointRegistersAndDoubles();
+ void PopSafepointRegistersAndDoubles();
+
+ // Store value in register src in the safepoint stack slot for register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst) {
+ Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
+
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
+
+ void CheckPageFlagSet(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_any_set);
+
+ void CheckPageFlagClear(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_all_clear);
+
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
+ // Check if object is in new space and jump accordingly.
+ // Register 'object' is preserved.
+ void JumpIfNotInNewSpace(Register object,
+ Label* branch) {
+ InNewSpace(object, ne, branch);
+ }
+
+ void JumpIfInNewSpace(Register object,
+ Label* branch) {
+ InNewSpace(object, eq, branch);
+ }
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* object_is_white_and_not_data);
+
+ // Detects conservatively whether an object is data-only, i.e. it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Helper for finding the mark bits for an address.
+ // Note that the behaviour slightly differs from other architectures.
+ // On exit:
+ // - addr_reg is unchanged.
+ // - The bitmap register points at the word with the mark bits.
+ // - The shift register contains the index of the first color bit for this
+ // object in the bitmap.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register shift_reg);
+
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black);
+
+
+ // Get the location of a relocated constant (its address in the constant pool)
+ // from its load site.
+ void GetRelocatedValueLocation(Register ldr_location,
+ Register result);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging.
+
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason);
+ void AssertRegisterIsClear(Register reg, BailoutReason reason);
+ void AssertRegisterIsRoot(
+ Register reg,
+ Heap::RootListIndex index,
+ BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
+ void AssertFastElements(Register elements);
+
+ // Abort if the specified register contains the invalid color bit pattern.
+ // The pattern must be in bits [1:0] of 'reg' register.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertHasValidColor(const Register& reg);
+
+ // Abort if 'object' register doesn't point to a string object.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertIsString(const Register& object);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason);
+ void CheckRegisterIsClear(Register reg, BailoutReason reason);
+
+ // Print a message to stderr and abort execution.
+ void Abort(BailoutReason reason);
+
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch1,
+ Register scratch2,
+ Label* no_map_match);
+
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers function and
+ // map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch);
+
+ CPURegList* TmpList() { return &tmp_list_; }
+ CPURegList* FPTmpList() { return &fptmp_list_; }
+
+ // Like printf, but print at run-time from generated code.
+ //
+ // The caller must ensure that arguments for floating-point placeholders
+ // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
+ // placeholders are Registers.
+ //
+ // A maximum of four arguments may be given to any single Printf call. The
+ // arguments must be of the same type, but they do not need to have the same
+ // size.
+ //
+ // The following registers cannot be printed:
+ // StackPointer(), csp.
+ //
+ // This function automatically preserves caller-saved registers so that
+ // calling code can use Printf at any point without having to worry about
+ // corruption. The preservation mechanism generates a lot of code. If this is
+ // a problem, preserve the important registers manually and then call
+ // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
+ // implicitly preserved.
+ //
+ // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be
+ // preserved, and can be printed. This allows Printf to be used during debug
+ // code.
+ //
+ // This function assumes (and asserts) that the current stack pointer is
+ // callee-saved, not caller-saved. This is most likely the case anyway, as a
+ // caller-saved stack pointer doesn't make a lot of sense.
+ void Printf(const char * format,
+ const CPURegister& arg0 = NoCPUReg,
+ const CPURegister& arg1 = NoCPUReg,
+ const CPURegister& arg2 = NoCPUReg,
+ const CPURegister& arg3 = NoCPUReg);
+
+ // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
+ //
+ // The return code from the system printf call will be returned in x0.
+ void PrintfNoPreserve(const char * format,
+ const CPURegister& arg0 = NoCPUReg,
+ const CPURegister& arg1 = NoCPUReg,
+ const CPURegister& arg2 = NoCPUReg,
+ const CPURegister& arg3 = NoCPUReg);
+
+ // Code ageing support functions.
+
+ // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
+ // function as old, it replaces some of the function prologue (generated by
+ // FullCodeGenerator::Generate) with a call to a special stub (ultimately
+ // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
+ // function prologue to its initial young state (indicating that it has been
+ // recently run) and continues. A young function is therefore one which has a
+ // normal frame setup sequence, and an old function has a code age sequence
+ // which calls a code ageing stub.
+
+ // Set up a basic stack frame for young code (or code exempt from ageing) with
+ // type FUNCTION. It may be patched later for code ageing support. This is
+ // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
+ //
+ // This function takes an Assembler so it can be called from either a
+ // MacroAssembler or a PatchingAssembler context.
+ static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
+
+ // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
+ void EmitFrameSetupForCodeAgePatching();
+
+ // Emit a code age sequence that calls the relevant code age stub. The code
+ // generated by this sequence is expected to replace the code generated by
+ // EmitFrameSetupForCodeAgePatching, and represents an old function.
+ //
+ // If stub is NULL, this function generates the code age sequence but omits
+ // the stub address that is normally embedded in the instruction stream. This
+ // can be used by debug code to verify code age sequences.
+ static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
+
+ // Call EmitCodeAgeSequence from a MacroAssembler context.
+ void EmitCodeAgeSequence(Code* stub);
+
+ // Return true if the sequence is a young sequence geneated by
+ // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
+ // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
+ static bool IsYoungSequence(byte* sequence);
+
+#ifdef DEBUG
+ // Return true if the sequence is a code age sequence generated by
+ // EmitCodeAgeSequence.
+ static bool IsCodeAgeSequence(byte* sequence);
+#endif
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
+ private:
+ // Helpers for CopyFields.
+ // These each implement CopyFields in a different way.
+ void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4,
+ Register scratch5);
+ void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4);
+ void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3);
+
+ // The actual Push and Pop implementations. These don't generate any code
+ // other than that required for the push or pop. This allows
+ // (Push|Pop)CPURegList to bundle together run-time assertions for a large
+ // block of registers.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PushHelper(int count, int size,
+ const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3);
+ void PopHelper(int count, int size,
+ const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3);
+
+ // Perform necessary maintenance operations before a push or pop.
+ //
+ // Note that size is specified in bytes.
+ void PrepareForPush(Operand total_size);
+ void PrepareForPop(Operand total_size);
+
+ void PrepareForPush(int count, int size) { PrepareForPush(count * size); }
+ void PrepareForPop(int count, int size) { PrepareForPop(count * size); }
+
+ // Call Printf. On a native build, a simple call will be generated, but if the
+ // simulator is being used then a suitable pseudo-instruction is used. The
+ // arguments and stack (csp) must be prepared by the caller as for a normal
+ // AAPCS64 call to 'printf'.
+ //
+ // The 'type' argument specifies the type of the optional arguments.
+ void CallPrintf(CPURegister::RegisterType type = CPURegister::kNoRegister);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry(Register exception,
+ Register object,
+ Register state,
+ Register scratch1,
+ Register scratch2);
+
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Try to convert a double to an int so that integer fast-paths may be
+ // used. Not every valid integer value is guaranteed to be caught.
+ // It supports both 32-bit and 64-bit integers depending whether 'as_int'
+ // is a W or X register.
+ //
+ // This does not distinguish between +0 and -0, so if this distinction is
+ // important it must be checked separately.
+ //
+ // On output the Z flag is set if the conversion was successful.
+ void TryConvertDoubleToInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion = NULL,
+ Label* on_failed_conversion = NULL);
+
+ bool generating_stub_;
+#if DEBUG
+ // Tell whether any of the macro instruction can be used. When false the
+ // MacroAssembler will assert if a method which can emit a variable number
+ // of instructions is called.
+ bool allow_macro_instructions_;
+#endif
+ bool has_frame_;
+
+ // The Abort method should call a V8 runtime function, but the CallRuntime
+ // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
+ // use a simpler abort mechanism that doesn't depend on CEntryStub.
+ //
+ // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
+ // being generated.
+ bool use_real_aborts_;
+
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // The register to use as a stack pointer for stack operations.
+ Register sp_;
+
+ // Scratch registers available for use by the MacroAssembler.
+ CPURegList tmp_list_;
+ CPURegList fptmp_list_;
+
+ void InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2);
+
+ public:
+ // Far branches resolving.
+ //
+ // The various classes of branch instructions with immediate offsets have
+ // different ranges. While the Assembler will fail to assemble a branch
+ // exceeding its range, the MacroAssembler offers a mechanism to resolve
+ // branches to too distant targets, either by tweaking the generated code to
+ // use branch instructions with wider ranges or generating veneers.
+ //
+ // Currently branches to distant targets are resolved using unconditional
+ // branch isntructions with a range of +-128MB. If that becomes too little
+ // (!), the mechanism can be extended to generate special veneers for really
+ // far targets.
+
+ // Helps resolve branching to labels potentially out of range.
+ // If the label is not bound, it registers the information necessary to later
+ // be able to emit a veneer for this branch if necessary.
+ // If the label is bound, it returns true if the label (or the previous link
+ // in the label chain) is out of range. In that case the caller is responsible
+ // for generating appropriate code.
+ // Otherwise it returns false.
+ // This function also checks wether veneers need to be emitted.
+ bool NeedExtraInstructionsOrRegisterBranch(Label *label,
+ ImmBranchType branch_type);
+};
+
+
+// Use this scope when you need a one-to-one mapping bewteen methods and
+// instructions. This scope prevents the MacroAssembler from being called and
+// literal pools from being emitted. It also asserts the number of instructions
+// emitted is what you specified when creating the scope.
+class InstructionAccurateScope BASE_EMBEDDED {
+ public:
+ InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
+ : masm_(masm)
+#ifdef DEBUG
+ ,
+ size_(count * kInstructionSize)
+#endif
+ {
+ // Before blocking the const pool, see if it needs to be emitted.
+ masm_->CheckConstPool(false, true);
+ masm_->CheckVeneerPool(false, true);
+
+ masm_->StartBlockPools();
+#ifdef DEBUG
+ if (count != 0) {
+ masm_->bind(&start_);
+ }
+ previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
+ masm_->set_allow_macro_instructions(false);
+#endif
+ }
+
+ ~InstructionAccurateScope() {
+ masm_->EndBlockPools();
+#ifdef DEBUG
+ if (start_.is_bound()) {
+ ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
+ }
+ masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
+#endif
+ }
+
+ private:
+ MacroAssembler* masm_;
+#ifdef DEBUG
+ size_t size_;
+ Label start_;
+ bool previous_allow_macro_instructions_;
+#endif
+};
+
+
+// This scope utility allows scratch registers to be managed safely. The
+// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
+// registers. These registers can be allocated on demand, and will be returned
+// at the end of the scope.
+//
+// When the scope ends, the MacroAssembler's lists will be restored to their
+// original state, even if the lists were modified by some other means.
+class UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(MacroAssembler* masm)
+ : available_(masm->TmpList()),
+ availablefp_(masm->FPTmpList()),
+ old_available_(available_->list()),
+ old_availablefp_(availablefp_->list()) {
+ ASSERT(available_->type() == CPURegister::kRegister);
+ ASSERT(availablefp_->type() == CPURegister::kFPRegister);
+ }
+
+ ~UseScratchRegisterScope();
+
+ // Take a register from the appropriate temps list. It will be returned
+ // automatically when the scope ends.
+ Register AcquireW() { return AcquireNextAvailable(available_).W(); }
+ Register AcquireX() { return AcquireNextAvailable(available_).X(); }
+ FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
+ FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+
+ Register UnsafeAcquire(const Register& reg) {
+ return Register(UnsafeAcquire(available_, reg));
+ }
+
+ Register AcquireSameSizeAs(const Register& reg);
+ FPRegister AcquireSameSizeAs(const FPRegister& reg);
+
+ private:
+ static CPURegister AcquireNextAvailable(CPURegList* available);
+ static CPURegister UnsafeAcquire(CPURegList* available,
+ const CPURegister& reg);
+
+ // Available scratch registers.
+ CPURegList* available_; // kRegister
+ CPURegList* availablefp_; // kFPRegister
+
+ // The state of the available lists at the start of this scope.
+ RegList old_available_; // kRegister
+ RegList old_availablefp_; // kFPRegister
+};
+
+
+inline MemOperand ContextMemOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+inline MemOperand GlobalObjectMemOperand() {
+ return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+// Encode and decode information about patchable inline SMI checks.
+class InlineSmiCheckInfo {
+ public:
+ explicit InlineSmiCheckInfo(Address info);
+
+ bool HasSmiCheck() const {
+ return smi_check_ != NULL;
+ }
+
+ const Register& SmiRegister() const {
+ return reg_;
+ }
+
+ Instruction* SmiCheck() const {
+ return smi_check_;
+ }
+
+ // Use MacroAssembler::InlineData to emit information about patchable inline
+ // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
+ // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
+ //
+ // The generated patch information can be read using the InlineSMICheckInfo
+ // class.
+ static void Emit(MacroAssembler* masm, const Register& reg,
+ const Label* smi_check);
+
+ // Emit information to indicate that there is no inline SMI check.
+ static void EmitNotInlined(MacroAssembler* masm) {
+ Label unbound;
+ Emit(masm, NoReg, &unbound);
+ }
+
+ private:
+ Register reg_;
+ Instruction* smi_check_;
+
+ // Fields in the data encoded by InlineData.
+
+ // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
+ // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
+ // used in a patchable check. The Emit() method checks this.
+ //
+ // Note that the total size of the fields is restricted by the underlying
+ // storage size handled by the BitField class, which is a uint32_t.
+ class RegisterBits : public BitField<unsigned, 0, 5> {};
+ class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
+};
+
+} } // namespace v8::internal
+
+#ifdef GENERATED_CODE_COVERAGE
+#error "Unsupported option"
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
new file mode 100644
index 0000000000..536580ab55
--- /dev/null
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
@@ -0,0 +1,1728 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "cpu-profiler.h"
+#include "unicode.h"
+#include "log.h"
+#include "code-stubs.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "arm64/regexp-macro-assembler-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention:
+ * - w19 : Used to temporarely store a value before a call to C code.
+ * See CheckNotBackReferenceIgnoreCase.
+ * - x20 : Pointer to the current code object (Code*),
+ * it includes the heap object tag.
+ * - w21 : Current position in input, as negative offset from
+ * the end of the string. Please notice that this is
+ * the byte offset, not the character offset!
+ * - w22 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - x23 : Points to tip of backtrack stack.
+ * - w24 : Position of the first character minus one: non_position_value.
+ * Used to initialize capture registers.
+ * - x25 : Address at the end of the input string: input_end.
+ * Points to byte after last character in input.
+ * - x26 : Address at the start of the input string: input_start.
+ * - w27 : Where to start in the input string.
+ * - x28 : Output array pointer.
+ * - x29/fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - x16/x17 : IP registers, used by assembler. Very volatile.
+ * - csp : Points to tip of C stack.
+ *
+ * - x0-x7 : Used as a cache to store 32 bit capture registers. These
+ * registers need to be retained every time a call to C code
+ * is done.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * Location Name Description
+ * (as referred to in
+ * the code)
+ *
+ * - fp[104] isolate Address of the current isolate.
+ * - fp[96] return_address Secondary link/return address
+ * used by an exit frame if this is a
+ * native call.
+ * ^^^ csp when called ^^^
+ * - fp[88] lr Return from the RegExp code.
+ * - fp[80] r29 Old frame pointer (CalleeSaved).
+ * - fp[0..72] r19-r28 Backup of CalleeSaved registers.
+ * - fp[-8] direct_call 1 => Direct call from JavaScript code.
+ * 0 => Call through the runtime system.
+ * - fp[-16] stack_base High end of the memory area to use as
+ * the backtracking stack.
+ * - fp[-24] output_size Output may fit multiple sets of matches.
+ * - fp[-32] input Handle containing the input string.
+ * - fp[-40] success_counter
+ * ^^^^^^^^^^^^^ From here and downwards we store 32 bit values ^^^^^^^^^^^^^
+ * - fp[-44] register N Capture registers initialized with
+ * - fp[-48] register N + 1 non_position_value.
+ * ... The first kNumCachedRegisters (N) registers
+ * ... are cached in x0 to x7.
+ * ... Only positions must be stored in the first
+ * - ... num_saved_registers_ registers.
+ * - ...
+ * - register N + num_registers - 1
+ * ^^^^^^^^^ csp ^^^^^^^^^
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input,
+ * int start_offset,
+ * Address input_start,
+ * Address input_end,
+ * int* output,
+ * int output_size,
+ * Address stack_base,
+ * bool direct_call = false,
+ * Address secondary_return_address, // Only used by native call.
+ * Isolate* isolate)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in arm64/simulator-arm64.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the LR register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(
+ Mode mode,
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_() {
+ __ SetStackPointer(csp);
+ ASSERT_EQ(0, registers_to_save % 2);
+ // We can cache at most 16 W registers in x0-x7.
+ STATIC_ASSERT(kNumCachedRegisters <= 16);
+ STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
+ __ B(&entry_label_); // We'll write the entry code later.
+ __ Bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+}
+
+int RegExpMacroAssemblerARM64::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerARM64::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Add(current_input_offset(),
+ current_input_offset(), by * char_size());
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
+ ASSERT((reg >= 0) && (reg < num_registers_));
+ if (by != 0) {
+ Register to_advance;
+ RegisterState register_state = GetRegisterState(reg);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(w10, register_location(reg));
+ __ Add(w10, w10, by);
+ __ Str(w10, register_location(reg));
+ break;
+ case CACHED_LSW:
+ to_advance = GetCachedRegister(reg);
+ __ Add(to_advance, to_advance, by);
+ break;
+ case CACHED_MSW:
+ to_advance = GetCachedRegister(reg);
+ __ Add(to_advance, to_advance,
+ static_cast<int64_t>(by) << kWRegSizeInBits);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::Backtrack() {
+ CheckPreemption();
+ Pop(w10);
+ __ Add(x10, code_pointer(), Operand(w10, UXTW));
+ __ Br(x10);
+}
+
+
+void RegExpMacroAssemblerARM64::Bind(Label* label) {
+ __ Bind(label);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacter(uint32_t c, Label* on_equal) {
+ CompareAndBranchOrBacktrack(current_character(), c, eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterGT(uc16 limit,
+ Label* on_greater) {
+ CompareAndBranchOrBacktrack(current_character(), limit, hi, on_greater);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the input string?
+ CompareAndBranchOrBacktrack(start_offset(), 0, ne, &not_at_start);
+ // If we did, are we still at the start of the input string?
+ __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, input_start());
+ BranchOrBacktrack(eq, on_at_start);
+ __ Bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the input string?
+ CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
+ // If we did, are we still at the start of the input string?
+ __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, input_start());
+ BranchOrBacktrack(ne, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterLT(uc16 limit, Label* on_less) {
+ CompareAndBranchOrBacktrack(current_character(), limit, lo, on_less);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string) {
+ // This method is only ever called from the cctests.
+
+ if (check_end_of_string) {
+ // Is last character of required match inside string.
+ CheckPosition(cp_offset + str.length() - 1, on_failure);
+ }
+
+ Register characters_address = x11;
+
+ __ Add(characters_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+ if (cp_offset != 0) {
+ __ Add(characters_address, characters_address, cp_offset * char_size());
+ }
+
+ for (int i = 0; i < str.length(); i++) {
+ if (mode_ == ASCII) {
+ __ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
+ ASSERT(str[i] <= String::kMaxOneByteCharCode);
+ } else {
+ __ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
+ }
+ CompareAndBranchOrBacktrack(w10, str[i], ne, on_failure);
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
+ __ Ldr(w10, MemOperand(backtrack_stackpointer()));
+ __ Cmp(current_input_offset(), w10);
+ __ Cset(x11, eq);
+ __ Add(backtrack_stackpointer(),
+ backtrack_stackpointer(), Operand(x11, LSL, kWRegSizeLog2));
+ BranchOrBacktrack(eq, on_equal);
+}
+
+void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ Register capture_start_offset = w10;
+ // Save the capture length in a callee-saved register so it will
+ // be preserved if we call a C helper.
+ Register capture_length = w19;
+ ASSERT(kCalleeSaved.IncludesAliasOf(capture_length));
+
+ // Find length of back-referenced capture.
+ ASSERT((start_reg % 2) == 0);
+ if (start_reg < kNumCachedRegisters) {
+ __ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
+ __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
+ } else {
+ __ Ldp(w11, capture_start_offset, capture_location(start_reg, x10));
+ }
+ __ Sub(capture_length, w11, capture_start_offset); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Cbz(capture_length, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ Register capture_start_address = x12;
+ Register capture_end_addresss = x13;
+ Register current_position_address = x14;
+
+ __ Add(capture_start_address,
+ input_end(),
+ Operand(capture_start_offset, SXTW));
+ __ Add(capture_end_addresss,
+ capture_start_address,
+ Operand(capture_length, SXTW));
+ __ Add(current_position_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+
+ Label loop;
+ __ Bind(&loop);
+ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
+ __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
+ __ Cmp(w10, w11);
+ __ B(eq, &loop_check);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Orr(w10, w10, 0x20); // Convert capture character to lower-case.
+ __ Orr(w11, w11, 0x20); // Also convert input character.
+ __ Cmp(w11, w10);
+ __ B(ne, &fail);
+ __ Sub(w10, w10, 'a');
+ __ Cmp(w10, 'z' - 'a'); // Is w10 a lowercase letter?
+ __ B(ls, &loop_check); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Sub(w10, w10, 224 - 'a');
+ __ Cmp(w10, 254 - 224);
+ __ Ccmp(w10, 247 - 224, ZFlag, ls); // Check for 247.
+ __ B(eq, &fail); // Weren't Latin-1 letters.
+
+ __ Bind(&loop_check);
+ __ Cmp(capture_start_address, capture_end_addresss);
+ __ B(lt, &loop);
+ __ B(&success);
+
+ __ Bind(&fail);
+ BranchOrBacktrack(al, on_no_match);
+
+ __ Bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
+ __ Ccmp(current_input_offset(), 0, NoFlag, eq);
+ // The current input offset should be <= 0, and fit in a W register.
+ __ Check(le, kOffsetOutOfRange);
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ int argument_count = 4;
+
+ // The cached registers need to be retained.
+ CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
+ ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+ __ PushCPURegList(cached_registers);
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // x0: Address byte_offset1 - Address captured substring's start.
+ // x1: Address byte_offset2 - Address of current character position.
+ // w2: size_t byte_length - length of capture in bytes(!)
+ // x3: Isolate* isolate
+
+ // Address of start of capture.
+ __ Add(x0, input_end(), Operand(capture_start_offset, SXTW));
+ // Length of capture.
+ __ Mov(w2, capture_length);
+ // Address of current input position.
+ __ Add(x1, input_end(), Operand(current_input_offset(), SXTW));
+ // Isolate.
+ __ Mov(x3, ExternalReference::isolate_address(isolate()));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Check if function returned non-zero for success or zero for failure.
+ CompareAndBranchOrBacktrack(x0, 0, eq, on_no_match);
+ // On success, increment position by length of capture.
+ __ Add(current_input_offset(), current_input_offset(), capture_length);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ }
+
+ __ Bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerARM64::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ Register capture_start_address = x12;
+ Register capture_end_address = x13;
+ Register current_position_address = x14;
+ Register capture_length = w15;
+
+ // Find length of back-referenced capture.
+ ASSERT((start_reg % 2) == 0);
+ if (start_reg < kNumCachedRegisters) {
+ __ Mov(x10, GetCachedRegister(start_reg));
+ __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
+ } else {
+ __ Ldp(w11, w10, capture_location(start_reg, x10));
+ }
+ __ Sub(capture_length, w11, w10); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Cbz(capture_length, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+
+ // Compute pointers to match string and capture string
+ __ Add(capture_start_address, input_end(), Operand(w10, SXTW));
+ __ Add(capture_end_address,
+ capture_start_address,
+ Operand(capture_length, SXTW));
+ __ Add(current_position_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+
+ Label loop;
+ __ Bind(&loop);
+ if (mode_ == ASCII) {
+ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
+ __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex));
+ __ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex));
+ }
+ __ Cmp(w10, w11);
+ BranchOrBacktrack(ne, on_no_match);
+ __ Cmp(capture_start_address, capture_end_address);
+ __ B(lt, &loop);
+
+ // Move current character position to position after match.
+ __ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
+ __ Ccmp(current_input_offset(), 0, NoFlag, eq);
+ // The current input offset should be <= 0, and fit in a W register.
+ __ Check(le, kOffsetOutOfRange);
+ }
+ __ Bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotCharacter(unsigned c,
+ Label* on_not_equal) {
+ CompareAndBranchOrBacktrack(current_character(), c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(w10, current_character(), mask);
+ CompareAndBranchOrBacktrack(w10, c, eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal) {
+ __ And(w10, current_character(), mask);
+ CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
+ __ Sub(w10, current_character(), minus);
+ __ And(w10, w10, mask);
+ CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_in_range) {
+ __ Sub(w10, current_character(), from);
+ // Unsigned lower-or-same condition.
+ CompareAndBranchOrBacktrack(w10, to - from, ls, on_in_range);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterNotInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_not_in_range) {
+ __ Sub(w10, current_character(), from);
+ // Unsigned higher condition.
+ CompareAndBranchOrBacktrack(w10, to - from, hi, on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckBitInTable(
+ Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ Mov(x11, Operand(table));
+ if ((mode_ != ASCII) || (kTableMask != String::kMaxOneByteCharCode)) {
+ __ And(w10, current_character(), kTableMask);
+ __ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Add(w10, current_character(), ByteArray::kHeaderSize - kHeapObjectTag);
+ }
+ __ Ldrb(w11, MemOperand(x11, w10, UXTW));
+ CompareAndBranchOrBacktrack(w11, 0, ne, on_bit_set);
+}
+
+
+bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == ASCII) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ // Check for ' ' or 0x00a0.
+ __ Cmp(current_character(), ' ');
+ __ Ccmp(current_character(), 0x00a0, ZFlag, ne);
+ __ B(eq, &success);
+ // Check range 0x09..0x0d.
+ __ Sub(w10, current_character(), '\t');
+ CompareAndBranchOrBacktrack(w10, '\r' - '\t', hi, on_no_match);
+ __ Bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9').
+ __ Sub(w10, current_character(), '0');
+ CompareAndBranchOrBacktrack(w10, '9' - '0', hi, on_no_match);
+ return true;
+ case 'D':
+ // Match ASCII non-digits.
+ __ Sub(w10, current_character(), '0');
+ CompareAndBranchOrBacktrack(w10, '9' - '0', ls, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Here we emit the conditional branch only once at the end to make branch
+ // prediction more efficient, even though we could branch out of here
+ // as soon as a character matches.
+ __ Cmp(current_character(), 0x0a);
+ __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ if (mode_ == UC16) {
+ __ Sub(w10, current_character(), 0x2028);
+ // If the Z flag was set we clear the flags to force a branch.
+ __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
+ // ls -> !((C==1) && (Z==0))
+ BranchOrBacktrack(ls, on_no_match);
+ } else {
+ BranchOrBacktrack(eq, on_no_match);
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // We have to check all 4 newline characters before emitting
+ // the conditional branch.
+ __ Cmp(current_character(), 0x0a);
+ __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ if (mode_ == UC16) {
+ __ Sub(w10, current_character(), 0x2028);
+ // If the Z flag was set we clear the flags to force a fall-through.
+ __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
+ // hi -> (C==1) && (Z==0)
+ BranchOrBacktrack(hi, on_no_match);
+ } else {
+ BranchOrBacktrack(ne, on_no_match);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ Mov(x10, map);
+ __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
+ CompareAndBranchOrBacktrack(w10, 0, eq, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ Cmp(current_character(), 'z');
+ __ B(hi, &done);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ Mov(x10, map);
+ __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
+ CompareAndBranchOrBacktrack(w10, 0, ne, on_no_match);
+ __ Bind(&done);
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::Fail() {
+ __ Mov(w0, FAILURE);
+ __ B(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
+ Label return_w0;
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ Bind(&entry_label_);
+
+ // Arguments on entry:
+ // x0: String* input
+ // x1: int start_offset
+ // x2: byte* input_start
+ // x3: byte* input_end
+ // x4: int* output array
+ // x5: int output array size
+ // x6: Address stack_base
+ // x7: int direct_call
+
+ // The stack pointer should be csp on entry.
+ // csp[8]: address of the current isolate
+ // csp[0]: secondary link/return address used by native call
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // code is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Push registers on the stack, only push the argument registers that we need.
+ CPURegList argument_registers(x0, x5, x6, x7);
+
+ CPURegList registers_to_retain = kCalleeSaved;
+ ASSERT(kCalleeSaved.Count() == 11);
+ registers_to_retain.Combine(lr);
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ PushCPURegList(registers_to_retain);
+ __ PushCPURegList(argument_registers);
+
+ // Set frame pointer in place.
+ __ Add(frame_pointer(), csp, argument_registers.Count() * kPointerSize);
+
+ // Initialize callee-saved registers.
+ __ Mov(start_offset(), w1);
+ __ Mov(input_start(), x2);
+ __ Mov(input_end(), x3);
+ __ Mov(output_array(), x4);
+
+ // Set the number of registers we will need to allocate, that is:
+ // - success_counter (X register)
+ // - (num_registers_ - kNumCachedRegisters) (W registers)
+ int num_wreg_to_allocate = num_registers_ - kNumCachedRegisters;
+ // Do not allocate registers on the stack if they can all be cached.
+ if (num_wreg_to_allocate < 0) { num_wreg_to_allocate = 0; }
+ // Make room for the success_counter.
+ num_wreg_to_allocate += 2;
+
+ // Make sure the stack alignment will be respected.
+ int alignment = masm_->ActivationFrameAlignment();
+ ASSERT_EQ(alignment % 16, 0);
+ int align_mask = (alignment / kWRegSize) - 1;
+ num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
+
+ // Check if we have space on the stack.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ __ Subs(x10, csp, x10);
+
+ // Handle it if the stack pointer is already below the stack limit.
+ __ B(ls, &stack_limit_hit);
+
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Cmp(x10, num_wreg_to_allocate * kWRegSize);
+ __ B(hs, &stack_ok);
+
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
+
+ __ Bind(&stack_limit_hit);
+ CallCheckStackGuardState(x10);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Cbnz(w0, &return_w0);
+
+ __ Bind(&stack_ok);
+
+ // Allocate space on stack.
+ __ Claim(num_wreg_to_allocate, kWRegSize);
+
+ // Initialize success_counter with 0.
+ __ Str(wzr, MemOperand(frame_pointer(), kSuccessCounter));
+
+ // Find negative length (offset of start relative to end).
+ __ Sub(x10, input_start(), input_end());
+ if (masm_->emit_debug_code()) {
+ // Check that the input string length is < 2^30.
+ __ Neg(x11, x10);
+ __ Cmp(x11, (1<<30) - 1);
+ __ Check(ls, kInputStringTooLong);
+ }
+ __ Mov(current_input_offset(), w10);
+
+ // The non-position value is used as a clearing value for the
+ // capture registers, it corresponds to the position of the first character
+ // minus one.
+ __ Sub(non_position_value(), current_input_offset(), char_size());
+ __ Sub(non_position_value(), non_position_value(),
+ Operand(start_offset(), LSL, (mode_ == UC16) ? 1 : 0));
+ // We can store this value twice in an X register for initializing
+ // on-stack registers later.
+ __ Orr(twice_non_position_value(),
+ non_position_value().X(),
+ Operand(non_position_value().X(), LSL, kWRegSizeInBits));
+
+ // Initialize code pointer register.
+ __ Mov(code_pointer(), Operand(masm_->CodeObject()));
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Cbnz(start_offset(), &load_char_start_regexp);
+ __ Mov(current_character(), '\n');
+ __ B(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ Bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&start_regexp);
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) {
+ ClearRegisters(0, num_saved_registers_ - 1);
+ }
+
+ // Initialize backtrack stack pointer.
+ __ Ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackBase));
+
+ // Execute
+ __ B(&start_label_);
+
+ if (backtrack_label_.is_linked()) {
+ __ Bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ if (success_label_.is_linked()) {
+ Register first_capture_start = w15;
+
+ // Save captures when successful.
+ __ Bind(&success_label_);
+
+ if (num_saved_registers_ > 0) {
+ // V8 expects the output to be an int32_t array.
+ Register capture_start = w12;
+ Register capture_end = w13;
+ Register input_length = w14;
+
+ // Copy captures to output.
+
+ // Get string length.
+ __ Sub(x10, input_end(), input_start());
+ if (masm_->emit_debug_code()) {
+ // Check that the input string length is < 2^30.
+ __ Cmp(x10, (1<<30) - 1);
+ __ Check(ls, kInputStringTooLong);
+ }
+ // input_start has a start_offset offset on entry. We need to include
+ // it when computing the length of the whole string.
+ if (mode_ == UC16) {
+ __ Add(input_length, start_offset(), Operand(w10, LSR, 1));
+ } else {
+ __ Add(input_length, start_offset(), w10);
+ }
+
+ // Copy the results to the output array from the cached registers first.
+ for (int i = 0;
+ (i < num_saved_registers_) && (i < kNumCachedRegisters);
+ i += 2) {
+ __ Mov(capture_start.X(), GetCachedRegister(i));
+ __ Lsr(capture_end.X(), capture_start.X(), kWRegSizeInBits);
+ if ((i == 0) && global_with_zero_length_check()) {
+ // Keep capture start for the zero-length check later.
+ __ Mov(first_capture_start, capture_start);
+ }
+ // Offsets need to be relative to the start of the string.
+ if (mode_ == UC16) {
+ __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ }
+
+ // Only carry on if there are more than kNumCachedRegisters capture
+ // registers.
+ int num_registers_left_on_stack =
+ num_saved_registers_ - kNumCachedRegisters;
+ if (num_registers_left_on_stack > 0) {
+ Register base = x10;
+ // There are always an even number of capture registers. A couple of
+ // registers determine one match with two offsets.
+ ASSERT_EQ(0, num_registers_left_on_stack % 2);
+ __ Add(base, frame_pointer(), kFirstCaptureOnStack);
+
+ // We can unroll the loop here, we should not unroll for less than 2
+ // registers.
+ STATIC_ASSERT(kNumRegistersToUnroll > 2);
+ if (num_registers_left_on_stack <= kNumRegistersToUnroll) {
+ for (int i = 0; i < num_registers_left_on_stack / 2; i++) {
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ if ((i == 0) && global_with_zero_length_check()) {
+ // Keep capture start for the zero-length check later.
+ __ Mov(first_capture_start, capture_start);
+ }
+ // Offsets need to be relative to the start of the string.
+ if (mode_ == UC16) {
+ __ Add(capture_start,
+ input_length,
+ Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ }
+ } else {
+ Label loop, start;
+ __ Mov(x11, num_registers_left_on_stack);
+
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ if (global_with_zero_length_check()) {
+ __ Mov(first_capture_start, capture_start);
+ }
+ __ B(&start);
+
+ __ Bind(&loop);
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ __ Bind(&start);
+ if (mode_ == UC16) {
+ __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ __ Sub(x11, x11, 2);
+ __ Cbnz(x11, &loop);
+ }
+ }
+ }
+
+ if (global()) {
+ Register success_counter = w0;
+ Register output_size = x10;
+ // Restart matching if the regular expression is flagged as global.
+
+ // Increment success counter.
+ __ Ldr(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+ __ Add(success_counter, success_counter, 1);
+ __ Str(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Ldr(output_size, MemOperand(frame_pointer(), kOutputSize));
+ __ Sub(output_size, output_size, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ __ Cmp(output_size, num_saved_registers_);
+ __ B(lt, &return_w0);
+
+ // The output pointer is already set to the next field in the output
+ // array.
+ // Update output size on the frame before we restart matching.
+ __ Str(output_size, MemOperand(frame_pointer(), kOutputSize));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ __ Cmp(current_input_offset(), first_capture_start);
+ // Not a zero-length match, restart.
+ __ B(ne, &load_char_start_regexp);
+ // Offset from the end is zero if we already reached the end.
+ __ Cbz(current_input_offset(), &return_w0);
+ // Advance current position after a zero-length match.
+ __ Add(current_input_offset(),
+ current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
+ __ B(&load_char_start_regexp);
+ } else {
+ __ Mov(w0, SUCCESS);
+ }
+ }
+
+ if (exit_label_.is_linked()) {
+ // Exit and return w0
+ __ Bind(&exit_label_);
+ if (global()) {
+ __ Ldr(w0, MemOperand(frame_pointer(), kSuccessCounter));
+ }
+ }
+
+ __ Bind(&return_w0);
+
+ // Set stack pointer back to first register to retain
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Mov(csp, fp);
+
+ // Restore registers.
+ __ PopCPURegList(registers_to_retain);
+
+ __ Ret();
+
+ Label exit_with_exception;
+ // Registers x0 to x7 are used to store the first captures, they need to be
+ // retained over calls to C++ code.
+ CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
+ ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+
+ if (check_preempt_label_.is_linked()) {
+ __ Bind(&check_preempt_label_);
+ SaveLinkRegister();
+ // The cached registers need to be retained.
+ __ PushCPURegList(cached_registers);
+ CallCheckStackGuardState(x10);
+ // Returning from the regexp code restores the stack (csp <- fp)
+ // so we don't need to drop the link register from it before exiting.
+ __ Cbnz(w0, &return_w0);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ RestoreLinkRegister();
+ __ Ret();
+ }
+
+ if (stack_overflow_label_.is_linked()) {
+ __ Bind(&stack_overflow_label_);
+ SaveLinkRegister();
+ // The cached registers need to be retained.
+ __ PushCPURegList(cached_registers);
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
+ __ Add(x1, frame_pointer(), kStackBase);
+ __ Mov(x0, backtrack_stackpointer());
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, 3);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ // Returning from the regexp code restores the stack (csp <- fp)
+ // so we don't need to drop the link register from it before exiting.
+ __ Cbz(w0, &exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ Mov(backtrack_stackpointer(), x0);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ RestoreLinkRegister();
+ __ Ret();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ __ Bind(&exit_with_exception);
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerARM64::GoTo(Label* to) {
+ BranchOrBacktrack(al, to);
+}
+
+void RegExpMacroAssemblerARM64::IfRegisterGE(int reg, int comparand,
+ Label* if_ge) {
+ Register to_compare = GetRegister(reg, w10);
+ CompareAndBranchOrBacktrack(to_compare, comparand, ge, if_ge);
+}
+
+
+void RegExpMacroAssemblerARM64::IfRegisterLT(int reg, int comparand,
+ Label* if_lt) {
+ Register to_compare = GetRegister(reg, w10);
+ CompareAndBranchOrBacktrack(to_compare, comparand, lt, if_lt);
+}
+
+
+void RegExpMacroAssemblerARM64::IfRegisterEqPos(int reg, Label* if_eq) {
+ Register to_compare = GetRegister(reg, w10);
+ __ Cmp(to_compare, current_input_offset());
+ BranchOrBacktrack(eq, if_eq);
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerARM64::Implementation() {
+ return kARM64Implementation;
+}
+
+
+void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ // TODO(pielan): Make sure long strings are caught before this, and not
+ // just asserted in debug mode.
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ // Be sane! (And ensure that an int32_t can be used to index the string)
+ ASSERT(cp_offset < (1<<30));
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerARM64::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerARM64::PopRegister(int register_index) {
+ Pop(w10);
+ StoreRegister(register_index, w10);
+}
+
+
+void RegExpMacroAssemblerARM64::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Adr(x10, label);
+ __ Sub(x10, x10, code_pointer());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(x10, kWRegMask);
+ // The code offset has to fit in a W register.
+ __ Check(ls, kOffsetOutOfRange);
+ }
+ }
+ Push(w10);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerARM64::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerARM64::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ Register to_push = GetRegister(register_index, w10);
+ Push(to_push);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerARM64::ReadCurrentPositionFromRegister(int reg) {
+ Register cached_register;
+ RegisterState register_state = GetRegisterState(reg);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(current_input_offset(), register_location(reg));
+ break;
+ case CACHED_LSW:
+ cached_register = GetCachedRegister(reg);
+ __ Mov(current_input_offset(), cached_register.W());
+ break;
+ case CACHED_MSW:
+ cached_register = GetCachedRegister(reg);
+ __ Lsr(current_input_offset().X(), cached_register, kWRegSizeInBits);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::ReadStackPointerFromRegister(int reg) {
+ Register read_from = GetRegister(reg, w10);
+ __ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
+ __ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
+}
+
+
+void RegExpMacroAssemblerARM64::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Cmp(current_input_offset(), -by * char_size());
+ __ B(ge, &after_position);
+ __ Mov(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerARM64::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ Register set_to = wzr;
+ if (to != 0) {
+ set_to = w10;
+ __ Mov(set_to, to);
+ }
+ StoreRegister(register_index, set_to);
+}
+
+
+bool RegExpMacroAssemblerARM64::Succeed() {
+ __ B(&success_label_);
+ return global();
+}
+
+
+void RegExpMacroAssemblerARM64::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ Register position = current_input_offset();
+ if (cp_offset != 0) {
+ position = w10;
+ __ Add(position, current_input_offset(), cp_offset * char_size());
+ }
+ StoreRegister(reg, position);
+}
+
+
+void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ int num_registers = reg_to - reg_from + 1;
+
+ // If the first capture register is cached in a hardware register but not
+ // aligned on a 64-bit one, we need to clear the first one specifically.
+ if ((reg_from < kNumCachedRegisters) && ((reg_from % 2) != 0)) {
+ StoreRegister(reg_from, non_position_value());
+ num_registers--;
+ reg_from++;
+ }
+
+ // Clear cached registers in pairs as far as possible.
+ while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) {
+ ASSERT(GetRegisterState(reg_from) == CACHED_LSW);
+ __ Mov(GetCachedRegister(reg_from), twice_non_position_value());
+ reg_from += 2;
+ num_registers -= 2;
+ }
+
+ if ((num_registers % 2) == 1) {
+ StoreRegister(reg_from, non_position_value());
+ num_registers--;
+ reg_from++;
+ }
+
+ if (num_registers > 0) {
+ // If there are some remaining registers, they are stored on the stack.
+ ASSERT(reg_from >= kNumCachedRegisters);
+
+ // Move down the indexes of the registers on stack to get the correct offset
+ // in memory.
+ reg_from -= kNumCachedRegisters;
+ reg_to -= kNumCachedRegisters;
+ // We should not unroll the loop for less than 2 registers.
+ STATIC_ASSERT(kNumRegistersToUnroll > 2);
+ // We position the base pointer to (reg_from + 1).
+ int base_offset = kFirstRegisterOnStack -
+ kWRegSize - (kWRegSize * reg_from);
+ if (num_registers > kNumRegistersToUnroll) {
+ Register base = x10;
+ __ Add(base, frame_pointer(), base_offset);
+
+ Label loop;
+ __ Mov(x11, num_registers);
+ __ Bind(&loop);
+ __ Str(twice_non_position_value(),
+ MemOperand(base, -kPointerSize, PostIndex));
+ __ Sub(x11, x11, 2);
+ __ Cbnz(x11, &loop);
+ } else {
+ for (int i = reg_from; i <= reg_to; i += 2) {
+ __ Str(twice_non_position_value(),
+ MemOperand(frame_pointer(), base_offset));
+ base_offset -= kWRegSize * 2;
+ }
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
+ __ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
+ __ Sub(x10, backtrack_stackpointer(), x10);
+ if (masm_->emit_debug_code()) {
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The stack offset needs to fit in a W register.
+ __ Check(eq, kOffsetOutOfRange);
+ }
+ StoreRegister(reg, w10);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return *reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+
+int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame,
+ int start_offset,
+ const byte** input_start,
+ const byte** input_end) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles(isolate);
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInput));
+
+ // Current string.
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+
+ if (*code_handle != re_code) { // Return address no longer valid
+ int delta = code_handle->address() - re_code->address();
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ Handle<String> subject_tmp = subject;
+ int slice_offset = 0;
+
+ // Extract the underlying string and the slice offset.
+ if (StringShape(*subject_tmp).IsCons()) {
+ subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+ } else if (StringShape(*subject_tmp).IsSliced()) {
+ SlicedString* slice = SlicedString::cast(*subject_tmp);
+ subject_tmp = Handle<String>(slice->parent());
+ slice_offset = slice->offset();
+ }
+
+ // String might have changed.
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject_tmp).IsSequential() ||
+ StringShape(*subject_tmp).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = *input_start;
+
+ // Find the current start address of the same character at the current string
+ // position.
+ const byte* new_address = StringCharacterPosition(*subject_tmp,
+ start_offset + slice_offset);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = *input_end;
+ int byte_length = static_cast<int>(end_address - start_address);
+ frame_entry<const String*>(re_frame, kInput) = *subject;
+ *input_start = new_address;
+ *input_end = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInput) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInput) = *subject;
+ }
+
+ return 0;
+}
+
+
+void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ CompareAndBranchOrBacktrack(current_input_offset(),
+ -cp_offset * char_size(),
+ ge,
+ on_outside_input);
+}
+
+
+bool RegExpMacroAssemblerARM64::CanReadUnaligned() {
+ // TODO(pielan): See whether or not we should disable unaligned accesses.
+ return !slow_safe();
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
+ // Allocate space on the stack to store the return address. The
+ // CheckStackGuardState C++ function will override it if the code
+ // moved. Allocate extra space for 2 arguments passed by pointers.
+ // AAPCS64 requires the stack to be 16 byte aligned.
+ int alignment = masm_->ActivationFrameAlignment();
+ ASSERT_EQ(alignment % 16, 0);
+ int align_mask = (alignment / kXRegSize) - 1;
+ int xreg_to_claim = (3 + align_mask) & ~align_mask;
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Claim(xreg_to_claim);
+
+ // CheckStackGuardState needs the end and start addresses of the input string.
+ __ Poke(input_end(), 2 * kPointerSize);
+ __ Add(x5, csp, 2 * kPointerSize);
+ __ Poke(input_start(), kPointerSize);
+ __ Add(x4, csp, kPointerSize);
+
+ __ Mov(w3, start_offset());
+ // RegExp code frame pointer.
+ __ Mov(x2, frame_pointer());
+ // Code* of self.
+ __ Mov(x1, Operand(masm_->CodeObject()));
+
+ // We need to pass a pointer to the return address as first argument.
+ // The DirectCEntry stub will place the return address on the stack before
+ // calling so the stack pointer will point to it.
+ __ Mov(x0, csp);
+
+ ExternalReference check_stack_guard_state =
+ ExternalReference::re_check_stack_guard_state(isolate());
+ __ Mov(scratch, check_stack_guard_state);
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, scratch);
+
+ // The input string may have been moved in memory, we need to reload it.
+ __ Peek(input_start(), kPointerSize);
+ __ Peek(input_end(), 2 * kPointerSize);
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Drop(xreg_to_claim);
+
+ // Reload the Code pointer.
+ __ Mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+void RegExpMacroAssemblerARM64::BranchOrBacktrack(Condition condition,
+ Label* to) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ B(to);
+ return;
+ }
+ if (to == NULL) {
+ to = &backtrack_label_;
+ }
+ // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
+ Condition inverted_condition = InvertCondition(condition);
+ Label no_branch;
+ __ B(inverted_condition, &no_branch);
+ __ B(to);
+ __ Bind(&no_branch);
+}
+
+void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to) {
+ if ((immediate == 0) && ((condition == eq) || (condition == ne))) {
+ if (to == NULL) {
+ to = &backtrack_label_;
+ }
+ // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
+ Label no_branch;
+ if (condition == eq) {
+ __ Cbnz(reg, &no_branch);
+ } else {
+ __ Cbz(reg, &no_branch);
+ }
+ __ B(to);
+ __ Bind(&no_branch);
+ } else {
+ __ Cmp(reg, immediate);
+ BranchOrBacktrack(condition, to);
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Cmp(csp, x10);
+ CallIf(&check_preempt_label_, ls);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ __ Cmp(backtrack_stackpointer(), x10);
+ CallIf(&stack_overflow_label_, ls);
+}
+
+
+void RegExpMacroAssemblerARM64::Push(Register source) {
+ ASSERT(source.Is32Bits());
+ ASSERT(!source.is(backtrack_stackpointer()));
+ __ Str(source,
+ MemOperand(backtrack_stackpointer(),
+ -static_cast<int>(kWRegSize),
+ PreIndex));
+}
+
+
+void RegExpMacroAssemblerARM64::Pop(Register target) {
+ ASSERT(target.Is32Bits());
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ Ldr(target,
+ MemOperand(backtrack_stackpointer(), kWRegSize, PostIndex));
+}
+
+
+Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
+ ASSERT(register_index < kNumCachedRegisters);
+ return Register::Create(register_index / 2, kXRegSizeInBits);
+}
+
+
+Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
+ Register maybe_result) {
+ ASSERT(maybe_result.Is32Bits());
+ ASSERT(register_index >= 0);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ Register result;
+ RegisterState register_state = GetRegisterState(register_index);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(maybe_result, register_location(register_index));
+ result = maybe_result;
+ break;
+ case CACHED_LSW:
+ result = GetCachedRegister(register_index).W();
+ break;
+ case CACHED_MSW:
+ __ Lsr(maybe_result.X(), GetCachedRegister(register_index),
+ kWRegSizeInBits);
+ result = maybe_result;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ ASSERT(result.Is32Bits());
+ return result;
+}
+
+
+void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
+ Register source) {
+ ASSERT(source.Is32Bits());
+ ASSERT(register_index >= 0);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+
+ Register cached_register;
+ RegisterState register_state = GetRegisterState(register_index);
+ switch (register_state) {
+ case STACKED:
+ __ Str(source, register_location(register_index));
+ break;
+ case CACHED_LSW:
+ cached_register = GetCachedRegister(register_index);
+ if (!source.Is(cached_register.W())) {
+ __ Bfi(cached_register, source.X(), 0, kWRegSizeInBits);
+ }
+ break;
+ case CACHED_MSW:
+ cached_register = GetCachedRegister(register_index);
+ __ Bfi(cached_register, source.X(), kWRegSizeInBits, kWRegSizeInBits);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
+ Label skip_call;
+ if (condition != al) __ B(&skip_call, InvertCondition(condition));
+ __ Bl(to);
+ __ Bind(&skip_call);
+}
+
+
+void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Pop(lr, xzr);
+ __ Add(lr, lr, Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerARM64::SaveLinkRegister() {
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Sub(lr, lr, Operand(masm_->CodeObject()));
+ __ Push(xzr, lr);
+}
+
+
+MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ ASSERT(register_index >= kNumCachedRegisters);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ register_index -= kNumCachedRegisters;
+ int offset = kFirstRegisterOnStack - register_index * kWRegSize;
+ return MemOperand(frame_pointer(), offset);
+}
+
+MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
+ Register scratch) {
+ ASSERT(register_index < (1<<30));
+ ASSERT(register_index < num_saved_registers_);
+ ASSERT(register_index >= kNumCachedRegisters);
+ ASSERT_EQ(register_index % 2, 0);
+ register_index -= kNumCachedRegisters;
+ int offset = kFirstCaptureOnStack - register_index * kWRegSize;
+ // capture_location is used with Stp instructions to load/store 2 registers.
+ // The immediate field in the encoding is limited to 7 bits (signed).
+ if (is_int7(offset)) {
+ return MemOperand(frame_pointer(), offset);
+ } else {
+ __ Add(scratch, frame_pointer(), offset);
+ return MemOperand(scratch);
+ }
+}
+
+void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+
+ // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
+ // and the operating system running on the target allow it.
+ // If unaligned load/stores are not supported then this function must only
+ // be used to load a single character at a time.
+
+ // ARMv8 supports unaligned accesses but V8 or the kernel can decide to
+ // disable it.
+ // TODO(pielan): See whether or not we should disable unaligned accesses.
+ if (!CanReadUnaligned()) {
+ ASSERT(characters == 1);
+ }
+
+ if (cp_offset != 0) {
+ if (masm_->emit_debug_code()) {
+ __ Mov(x10, cp_offset * char_size());
+ __ Add(x10, x10, Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The offset needs to fit in a W register.
+ __ Check(eq, kOffsetOutOfRange);
+ } else {
+ __ Add(w10, current_input_offset(), cp_offset * char_size());
+ }
+ offset = w10;
+ }
+
+ if (mode_ == ASCII) {
+ if (characters == 4) {
+ __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else if (characters == 2) {
+ __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else {
+ ASSERT(characters == 1);
+ __ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ if (characters == 2) {
+ __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else {
+ ASSERT(characters == 1);
+ __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
+ }
+ }
+}
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
new file mode 100644
index 0000000000..534fd5b01a
--- /dev/null
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
@@ -0,0 +1,315 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
+#define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
+
+#include "arm64/assembler-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerARM64(Mode mode, int registers_to_save, Zone* zone);
+ virtual ~RegExpMacroAssemblerARM64();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(unsigned c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ virtual void CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from,
+ uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from,
+ uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame,
+ int start_offset,
+ const byte** input_start,
+ const byte** input_end);
+
+ private:
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Callee-saved registers x19-x29, where x29 is the old frame pointer.
+ static const int kCalleeSavedRegisters = 0;
+ // Return address.
+ // It is placed above the 11 callee-saved registers.
+ static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
+ static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+ // Stack parameter placed by caller.
+ static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
+ static const int kStackBase = kDirectCall - kPointerSize;
+ static const int kOutputSize = kStackBase - kPointerSize;
+ static const int kInput = kOutputSize - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessCounter = kInput - kPointerSize;
+ // First position register address on the stack. Following positions are
+ // below it. A position is a 32 bit value.
+ static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSize;
+ // A capture is a 64 bit value holding two position.
+ static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // When initializing registers to a non-position value we can unroll
+ // the loop. Set the limit of registers to unroll.
+ static const int kNumRegistersToUnroll = 16;
+
+ // We are using x0 to x7 as a register cache. Each hardware register must
+ // contain one capture, that is two 32 bit registers. We can cache at most
+ // 16 registers.
+ static const int kNumCachedRegisters = 16;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // Location of a 32 bit position register.
+ MemOperand register_location(int register_index);
+
+ // Location of a 64 bit capture, combining two position registers.
+ MemOperand capture_location(int register_index, Register scratch);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ Register current_input_offset() { return w21; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ Register current_character() { return w22; }
+
+ // Register holding address of the end of the input string.
+ Register input_end() { return x25; }
+
+ // Register holding address of the start of the input string.
+ Register input_start() { return x26; }
+
+ // Register holding the offset from the start of the string where we should
+ // start matching.
+ Register start_offset() { return w27; }
+
+ // Pointer to the output array's first element.
+ Register output_array() { return x28; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ Register backtrack_stackpointer() { return x23; }
+
+ // Register holding pointer to the current code object.
+ Register code_pointer() { return x20; }
+
+ // Register holding the value used for clearing capture registers.
+ Register non_position_value() { return w24; }
+ // The top 32 bit of this register is used to store this value
+ // twice. This is used for clearing more than one register at a time.
+ Register twice_non_position_value() { return x24; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to);
+
+ // Compares reg against immmediate before calling BranchOrBacktrack.
+ // It makes use of the Cbz and Cbnz instructions.
+ void CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to);
+
+ inline void CallIf(Label* to, Condition condition);
+
+ // Save and restore the link register on the stack in a way that
+ // is GC-safe.
+ inline void SaveLinkRegister();
+ inline void RestoreLinkRegister();
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ // This state indicates where the register actually is.
+ enum RegisterState {
+ STACKED, // Resides in memory.
+ CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
+ CACHED_MSW // Most Significant Word of a 64 bit hardware register.
+ };
+
+ RegisterState GetRegisterState(int register_index) {
+ ASSERT(register_index >= 0);
+ if (register_index >= kNumCachedRegisters) {
+ return STACKED;
+ } else {
+ if ((register_index % 2) == 0) {
+ return CACHED_LSW;
+ } else {
+ return CACHED_MSW;
+ }
+ }
+ }
+
+ // Store helper that takes the state of the register into account.
+ inline void StoreRegister(int register_index, Register source);
+
+ // Returns a hardware W register that holds the value of the capture
+ // register.
+ //
+ // This function will try to use an existing cache register (w0-w7) for the
+ // result. Otherwise, it will load the value into maybe_result.
+ //
+ // If the returned register is anything other than maybe_result, calling code
+ // must not write to it.
+ inline Register GetRegister(int register_index, Register maybe_result);
+
+ // Returns the harware register (x0-x7) holding the value of the capture
+ // register.
+ // This assumes that the state of the register is not STACKED.
+ inline Register GetCachedRegister(int register_index);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+};
+
+#endif // V8_INTERPRETED_REGEXP
+
+
+}} // namespace v8::internal
+
+#endif // V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
new file mode 100644
index 0000000000..cd475b40e1
--- /dev/null
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -0,0 +1,3645 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include <cmath>
+#include <cstdarg>
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "disasm.h"
+#include "assembler.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/simulator-arm64.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#if defined(USE_SIMULATOR)
+
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+
+// Helpers for colors.
+// Depending on your terminal configuration, the colour names may not match the
+// observed colours.
+#define COLOUR(colour_code) "\033[" colour_code "m"
+#define BOLD(colour_code) "1;" colour_code
+#define NORMAL ""
+#define GREY "30"
+#define GREEN "32"
+#define ORANGE "33"
+#define BLUE "34"
+#define PURPLE "35"
+#define INDIGO "36"
+#define WHITE "37"
+typedef char const * const TEXT_COLOUR;
+TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
+TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR(BOLD(GREY)) : "";
+TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(BOLD(WHITE)) : "";
+TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR(BOLD(BLUE)) : "";
+TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(BOLD(INDIGO)) : "";
+TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
+TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(BOLD(PURPLE)) : "";
+TEXT_COLOUR clr_memory_value = FLAG_log_colour ? COLOUR(BOLD(GREEN)) : "";
+TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR(GREEN) : "";
+TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
+TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(ORANGE) : "";
+TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
+
+
+// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
+void PRINTF_CHECKING TraceSim(const char* format, ...) {
+ if (FLAG_trace_sim) {
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+ }
+}
+
+
+const Instruction* Simulator::kEndOfSimAddress = NULL;
+
+
+void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
+ int width = msb - lsb + 1;
+ ASSERT(is_uintn(bits, width) || is_intn(bits, width));
+
+ bits <<= lsb;
+ uint32_t mask = ((1 << width) - 1) << lsb;
+ ASSERT((mask & write_ignore_mask_) == 0);
+
+ value_ = (value_ & ~mask) | (bits & mask);
+}
+
+
+SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
+ case FPCR:
+ return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
+ default:
+ UNREACHABLE();
+ return SimSystemRegister();
+ }
+}
+
+
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ExternalReference::set_redirector(isolate, &RedirectExternalReference);
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == NULL) {
+ if (FLAG_trace_sim || FLAG_log_instruction_stats || FLAG_debug_sim) {
+ sim = new Simulator(new Decoder<DispatchingDecoderVisitor>(), isolate);
+ } else {
+ sim = new Decoder<Simulator>();
+ sim->isolate_ = isolate;
+ }
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+
+void Simulator::CallVoid(byte* entry, CallArgument* args) {
+ int index_x = 0;
+ int index_d = 0;
+
+ std::vector<int64_t> stack_args(0);
+ for (int i = 0; !args[i].IsEnd(); i++) {
+ CallArgument arg = args[i];
+ if (arg.IsX() && (index_x < 8)) {
+ set_xreg(index_x++, arg.bits());
+ } else if (arg.IsD() && (index_d < 8)) {
+ set_dreg_bits(index_d++, arg.bits());
+ } else {
+ ASSERT(arg.IsD() || arg.IsX());
+ stack_args.push_back(arg.bits());
+ }
+ }
+
+ // Process stack arguments, and make sure the stack is suitably aligned.
+ uintptr_t original_stack = sp();
+ uintptr_t entry_stack = original_stack -
+ stack_args.size() * sizeof(stack_args[0]);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ char * stack = reinterpret_cast<char*>(entry_stack);
+ std::vector<int64_t>::const_iterator it;
+ for (it = stack_args.begin(); it != stack_args.end(); it++) {
+ memcpy(stack, &(*it), sizeof(*it));
+ stack += sizeof(*it);
+ }
+
+ ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
+ set_sp(entry_stack);
+
+ // Call the generated code.
+ set_pc(entry);
+ set_lr(kEndOfSimAddress);
+ CheckPCSComplianceAndRun();
+
+ set_sp(original_stack);
+}
+
+
+int64_t Simulator::CallInt64(byte* entry, CallArgument* args) {
+ CallVoid(entry, args);
+ return xreg(0);
+}
+
+
+double Simulator::CallDouble(byte* entry, CallArgument* args) {
+ CallVoid(entry, args);
+ return dreg(0);
+}
+
+
+int64_t Simulator::CallJS(byte* entry,
+ byte* function_entry,
+ JSFunction* func,
+ Object* revc,
+ int64_t argc,
+ Object*** argv) {
+ CallArgument args[] = {
+ CallArgument(function_entry),
+ CallArgument(func),
+ CallArgument(revc),
+ CallArgument(argc),
+ CallArgument(argv),
+ CallArgument::End()
+ };
+ return CallInt64(entry, args);
+}
+
+int64_t Simulator::CallRegExp(byte* entry,
+ String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate) {
+ CallArgument args[] = {
+ CallArgument(input),
+ CallArgument(start_offset),
+ CallArgument(input_start),
+ CallArgument(input_end),
+ CallArgument(output),
+ CallArgument(output_size),
+ CallArgument(stack_base),
+ CallArgument(direct_call),
+ CallArgument(return_address),
+ CallArgument(isolate),
+ CallArgument::End()
+ };
+ return CallInt64(entry, args);
+}
+
+
+void Simulator::CheckPCSComplianceAndRun() {
+#ifdef DEBUG
+ CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
+ CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
+
+ int64_t saved_registers[kNumberOfCalleeSavedRegisters];
+ uint64_t saved_fpregisters[kNumberOfCalleeSavedFPRegisters];
+
+ CPURegList register_list = kCalleeSaved;
+ CPURegList fpregister_list = kCalleeSavedFP;
+
+ for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
+ // x31 is not a caller saved register, so no need to specify if we want
+ // the stack or zero.
+ saved_registers[i] = xreg(register_list.PopLowestIndex().code());
+ }
+ for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ saved_fpregisters[i] =
+ dreg_bits(fpregister_list.PopLowestIndex().code());
+ }
+ int64_t original_stack = sp();
+#endif
+ // Start the simulation!
+ Run();
+#ifdef DEBUG
+ CHECK_EQ(original_stack, sp());
+ // Check that callee-saved registers have been preserved.
+ register_list = kCalleeSaved;
+ fpregister_list = kCalleeSavedFP;
+ for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
+ CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
+ }
+ for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ ASSERT(saved_fpregisters[i] ==
+ dreg_bits(fpregister_list.PopLowestIndex().code()));
+ }
+
+ // Corrupt caller saved register minus the return regiters.
+
+ // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1
+ // for now .
+ register_list = kCallerSaved;
+ register_list.Remove(x0);
+ register_list.Remove(x1);
+
+ // In theory d0 to d7 can be used for return values, but V8 only uses d0
+ // for now .
+ fpregister_list = kCallerSavedFP;
+ fpregister_list.Remove(d0);
+
+ CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+#endif
+}
+
+
+#ifdef DEBUG
+// The least significant byte of the curruption value holds the corresponding
+// register's code.
+void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
+ if (list->type() == CPURegister::kRegister) {
+ while (!list->IsEmpty()) {
+ unsigned code = list->PopLowestIndex().code();
+ set_xreg(code, value | code);
+ }
+ } else {
+ ASSERT(list->type() == CPURegister::kFPRegister);
+ while (!list->IsEmpty()) {
+ unsigned code = list->PopLowestIndex().code();
+ set_dreg_bits(code, value | code);
+ }
+ }
+}
+
+
+void Simulator::CorruptAllCallerSavedCPURegisters() {
+ // Corrupt alters its parameter so copy them first.
+ CPURegList register_list = kCallerSaved;
+ CPURegList fpregister_list = kCallerSavedFP;
+
+ CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+}
+#endif
+
+
+// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
+ intptr_t new_sp = sp() - 2 * kXRegSize;
+ uintptr_t* alignment_slot =
+ reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
+ memcpy(alignment_slot, &kSlotsZapValue, kPointerSize);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ memcpy(stack_slot, &address, kPointerSize);
+ set_sp(new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ intptr_t current_sp = sp();
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
+ set_sp(current_sp + 2 * kXRegSize);
+ return address;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+ // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+ // pushing values.
+ return reinterpret_cast<uintptr_t>(stack_limit_) + 1024;
+}
+
+
+Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
+ Isolate* isolate, FILE* stream)
+ : decoder_(decoder),
+ last_debugger_input_(NULL),
+ log_parameters_(NO_PARAM),
+ isolate_(isolate) {
+ // Setup the decoder.
+ decoder_->AppendVisitor(this);
+
+ Init(stream);
+
+ if (FLAG_trace_sim) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ log_parameters_ = LOG_ALL;
+ }
+
+ if (FLAG_log_instruction_stats) {
+ instrument_ = new Instrument(FLAG_log_instruction_file,
+ FLAG_log_instruction_period);
+ decoder_->AppendVisitor(instrument_);
+ }
+}
+
+
+Simulator::Simulator()
+ : decoder_(NULL),
+ last_debugger_input_(NULL),
+ log_parameters_(NO_PARAM),
+ isolate_(NULL) {
+ Init(NULL);
+ CHECK(!FLAG_trace_sim && !FLAG_log_instruction_stats);
+}
+
+
+void Simulator::Init(FILE* stream) {
+ ResetState();
+
+ // Allocate and setup the simulator stack.
+ stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
+ stack_ = new byte[stack_size_];
+ stack_limit_ = stack_ + stack_protection_size_;
+ byte* tos = stack_ + stack_size_ - stack_protection_size_;
+ // The stack pointer must be 16 bytes aligned.
+ set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
+
+ stream_ = stream;
+ print_disasm_ = new PrintDisassembler(stream_);
+
+ // The debugger needs to disassemble code without the simulator executing an
+ // instruction, so we create a dedicated decoder.
+ disassembler_decoder_ = new Decoder<DispatchingDecoderVisitor>();
+ disassembler_decoder_->AppendVisitor(print_disasm_);
+}
+
+
+void Simulator::ResetState() {
+ // Reset the system registers.
+ nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
+ fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
+
+ // Reset registers to 0.
+ pc_ = NULL;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ set_xreg(i, 0xbadbeef);
+ }
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
+ set_dreg_bits(i, 0x7ff000007f800001UL);
+ }
+ // Returning to address 0 exits the Simulator.
+ set_lr(kEndOfSimAddress);
+
+ // Reset debug helpers.
+ breakpoints_.empty();
+ break_on_next_= false;
+}
+
+
+Simulator::~Simulator() {
+ delete[] stack_;
+ if (FLAG_log_instruction_stats) {
+ delete instrument_;
+ }
+ delete disassembler_decoder_;
+ delete print_disasm_;
+ DeleteArray(last_debugger_input_);
+ delete decoder_;
+}
+
+
+void Simulator::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ ExecuteInstruction();
+ }
+}
+
+
+void Simulator::RunFrom(Instruction* start) {
+ set_pc(start);
+ Run();
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a svc (Supervisor Call) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ public:
+ Redirection(void* external_function, ExternalReference::Type type)
+ : external_function_(external_function),
+ type_(type),
+ next_(NULL) {
+ redirect_call_.SetInstructionBits(
+ HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ // TODO(all): Simulator flush I cache
+ isolate->set_simulator_redirection(this);
+ }
+
+ void* address_of_redirect_call() {
+ return reinterpret_cast<void*>(&redirect_call_);
+ }
+
+ template <typename T>
+ T external_function() { return reinterpret_cast<T>(external_function_); }
+
+ ExternalReference::Type type() { return type_; }
+
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
+ if (current->external_function_ == external_function) {
+ ASSERT_EQ(current->type(), type);
+ return current;
+ }
+ }
+ return new Redirection(external_function, type);
+ }
+
+ static Redirection* FromHltInstruction(Instruction* redirect_call) {
+ char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
+ char* addr_of_redirection =
+ addr_of_hlt - OFFSET_OF(Redirection, redirect_call_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static void* ReverseRedirection(int64_t reg) {
+ Redirection* redirection =
+ FromHltInstruction(reinterpret_cast<Instruction*>(reg));
+ return redirection->external_function<void*>();
+ }
+
+ private:
+ void* external_function_;
+ Instruction redirect_call_;
+ ExternalReference::Type type_;
+ Redirection* next_;
+};
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair structure.
+// The simulator assumes all runtime calls return two 64-bits values. If they
+// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
+struct ObjectPair {
+ int64_t res0;
+ int64_t res1;
+};
+
+
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3,
+ int64_t arg4,
+ int64_t arg5,
+ int64_t arg6,
+ int64_t arg7);
+
+typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
+typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
+typedef double (*SimulatorRuntimeFPCall)(double arg1);
+typedef double (*SimulatorRuntimeFPIntCall)(double arg1, int32_t arg2);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
+ void* arg2);
+
+void Simulator::DoRuntimeCall(Instruction* instr) {
+ Redirection* redirection = Redirection::FromHltInstruction(instr);
+
+ // The called C code might itself call simulated code, so any
+ // caller-saved registers (including lr) could still be clobbered by a
+ // redirected call.
+ Instruction* return_address = lr();
+
+ int64_t external = redirection->external_function<int64_t>();
+
+ TraceSim("Call to host function at %p\n",
+ redirection->external_function<void*>());
+
+ // SP must be 16-byte-aligned at the call interface.
+ bool stack_alignment_exception = ((sp() & 0xf) != 0);
+ if (stack_alignment_exception) {
+ TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
+ FATAL("ALIGNMENT EXCEPTION");
+ }
+
+ switch (redirection->type()) {
+ default:
+ TraceSim("Type: Unknown.\n");
+ UNREACHABLE();
+ break;
+
+ case ExternalReference::BUILTIN_CALL: {
+ // MaybeObject* f(v8::internal::Arguments).
+ TraceSim("Type: BUILTIN_CALL\n");
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+
+ // We don't know how many arguments are being passed, but we can
+ // pass 8 without touching the stack. They will be ignored by the
+ // host function if they aren't used.
+ TraceSim("Arguments: "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64,
+ xreg(0), xreg(1), xreg(2), xreg(3),
+ xreg(4), xreg(5), xreg(6), xreg(7));
+ ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
+ xreg(4), xreg(5), xreg(6), xreg(7));
+ TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64 "}\n",
+ result.res0, result.res1);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result.res0);
+ set_xreg(1, result.res1);
+ break;
+ }
+
+ case ExternalReference::DIRECT_API_CALL: {
+ // void f(v8::FunctionCallbackInfo&)
+ TraceSim("Type: DIRECT_API_CALL\n");
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
+ target(xreg(0));
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ // int f(double, double)
+ TraceSim("Type: BUILTIN_COMPARE_CALL\n");
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
+ int64_t result = target(dreg(0), dreg(1));
+ TraceSim("Returned: %" PRId64 "\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_CALL: {
+ // double f(double)
+ TraceSim("Type: BUILTIN_FP_CALL\n");
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ TraceSim("Argument: %f\n", dreg(0));
+ double result = target(dreg(0));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ // double f(double, double)
+ TraceSim("Type: BUILTIN_FP_FP_CALL\n");
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
+ double result = target(dreg(0), dreg(1));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ // double f(double, int)
+ TraceSim("Type: BUILTIN_FP_INT_CALL\n");
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ TraceSim("Arguments: %f, %d\n", dreg(0), wreg(0));
+ double result = target(dreg(0), wreg(0));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::DIRECT_GETTER_CALL: {
+ // void f(Local<String> property, PropertyCallbackInfo& info)
+ TraceSim("Type: DIRECT_GETTER_CALL\n");
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n",
+ xreg(0), xreg(1));
+ target(xreg(0), xreg(1));
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::PROFILING_API_CALL: {
+ // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
+ TraceSim("Type: PROFILING_API_CALL\n");
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ void* arg1 = Redirection::ReverseRedirection(xreg(1));
+ TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
+ target(xreg(0), arg1);
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::PROFILING_GETTER_CALL: {
+ // void f(Local<String> property, PropertyCallbackInfo& info,
+ // AccessorGetterCallback callback)
+ TraceSim("Type: PROFILING_GETTER_CALL\n");
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
+ external);
+ void* arg2 = Redirection::ReverseRedirection(xreg(2));
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 ", %p\n",
+ xreg(0), xreg(1), arg2);
+ target(xreg(0), xreg(1), arg2);
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+ }
+
+ set_lr(return_address);
+ set_pc(return_address);
+}
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
+ return redirection->address_of_redirect_call();
+}
+
+
+const char* Simulator::xreg_names[] = {
+"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
+"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
+
+const char* Simulator::wreg_names[] = {
+"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
+
+const char* Simulator::sreg_names[] = {
+"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+
+const char* Simulator::dreg_names[] = {
+"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+const char* Simulator::vreg_names[] = {
+"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+
+const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
+ ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return wreg_names[code];
+}
+
+
+const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
+ ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return xreg_names[code];
+}
+
+
+const char* Simulator::SRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return sreg_names[code];
+}
+
+
+const char* Simulator::DRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return dreg_names[code];
+}
+
+
+const char* Simulator::VRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return vreg_names[code];
+}
+
+
+int Simulator::CodeFromName(const char* name) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if ((strcmp(xreg_names[i], name) == 0) ||
+ (strcmp(wreg_names[i], name) == 0)) {
+ return i;
+ }
+ }
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if ((strcmp(vreg_names[i], name) == 0) ||
+ (strcmp(dreg_names[i], name) == 0) ||
+ (strcmp(sreg_names[i], name) == 0)) {
+ return i;
+ }
+ }
+ if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
+ return kSPRegInternalCode;
+ }
+ return -1;
+}
+
+
+// Helpers ---------------------------------------------------------------------
+int64_t Simulator::AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ int64_t src1,
+ int64_t src2,
+ int64_t carry_in) {
+ ASSERT((carry_in == 0) || (carry_in == 1));
+ ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
+
+ uint64_t u1, u2;
+ int64_t result;
+ int64_t signed_sum = src1 + src2 + carry_in;
+
+ bool N, Z, C, V;
+
+ if (reg_size == kWRegSizeInBits) {
+ u1 = static_cast<uint64_t>(src1) & kWRegMask;
+ u2 = static_cast<uint64_t>(src2) & kWRegMask;
+
+ result = signed_sum & kWRegMask;
+ // Compute the C flag by comparing the sum to the max unsigned integer.
+ C = ((kWMaxUInt - u1) < (u2 + carry_in)) ||
+ ((kWMaxUInt - u1 - carry_in) < u2);
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ int64_t s_src1 = src1 << (kXRegSizeInBits - kWRegSizeInBits);
+ int64_t s_src2 = src2 << (kXRegSizeInBits - kWRegSizeInBits);
+ int64_t s_result = result << (kXRegSizeInBits - kWRegSizeInBits);
+ V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0);
+
+ } else {
+ u1 = static_cast<uint64_t>(src1);
+ u2 = static_cast<uint64_t>(src2);
+
+ result = signed_sum;
+ // Compute the C flag by comparing the sum to the max unsigned integer.
+ C = ((kXMaxUInt - u1) < (u2 + carry_in)) ||
+ ((kXMaxUInt - u1 - carry_in) < u2);
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
+ }
+
+ N = CalcNFlag(result, reg_size);
+ Z = CalcZFlag(result);
+
+ if (set_flags) {
+ nzcv().SetN(N);
+ nzcv().SetZ(Z);
+ nzcv().SetC(C);
+ nzcv().SetV(V);
+ }
+ return result;
+}
+
+
+int64_t Simulator::ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount) {
+ if (amount == 0) {
+ return value;
+ }
+ int64_t mask = reg_size == kXRegSizeInBits ? kXRegMask : kWRegMask;
+ switch (shift_type) {
+ case LSL:
+ return (value << amount) & mask;
+ case LSR:
+ return static_cast<uint64_t>(value) >> amount;
+ case ASR: {
+ // Shift used to restore the sign.
+ unsigned s_shift = kXRegSizeInBits - reg_size;
+ // Value with its sign restored.
+ int64_t s_value = (value << s_shift) >> s_shift;
+ return (s_value >> amount) & mask;
+ }
+ case ROR: {
+ if (reg_size == kWRegSizeInBits) {
+ value &= kWRegMask;
+ }
+ return (static_cast<uint64_t>(value) >> amount) |
+ ((value & ((1L << amount) - 1L)) << (reg_size - amount));
+ }
+ default:
+ UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int64_t Simulator::ExtendValue(unsigned reg_size,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift) {
+ switch (extend_type) {
+ case UXTB:
+ value &= kByteMask;
+ break;
+ case UXTH:
+ value &= kHalfWordMask;
+ break;
+ case UXTW:
+ value &= kWordMask;
+ break;
+ case SXTB:
+ value = (value << 56) >> 56;
+ break;
+ case SXTH:
+ value = (value << 48) >> 48;
+ break;
+ case SXTW:
+ value = (value << 32) >> 32;
+ break;
+ case UXTX:
+ case SXTX:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ int64_t mask = (reg_size == kXRegSizeInBits) ? kXRegMask : kWRegMask;
+ return (value << left_shift) & mask;
+}
+
+
+template<> double Simulator::FPDefaultNaN<double>() const {
+ return kFP64DefaultNaN;
+}
+
+
+template<> float Simulator::FPDefaultNaN<float>() const {
+ return kFP32DefaultNaN;
+}
+
+
+void Simulator::FPCompare(double val0, double val1) {
+ AssertSupportedFPCR();
+
+ // TODO(jbramley): This assumes that the C++ implementation handles
+ // comparisons in the way that we expect (as per AssertSupportedFPCR()).
+ if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
+ nzcv().SetRawValue(FPUnorderedFlag);
+ } else if (val0 < val1) {
+ nzcv().SetRawValue(FPLessThanFlag);
+ } else if (val0 > val1) {
+ nzcv().SetRawValue(FPGreaterThanFlag);
+ } else if (val0 == val1) {
+ nzcv().SetRawValue(FPEqualFlag);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::SetBreakpoint(Instruction* location) {
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if (breakpoints_.at(i).location == location) {
+ PrintF("Existing breakpoint at %p was %s\n",
+ reinterpret_cast<void*>(location),
+ breakpoints_.at(i).enabled ? "disabled" : "enabled");
+ breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
+ return;
+ }
+ }
+ Breakpoint new_breakpoint = {location, true};
+ breakpoints_.push_back(new_breakpoint);
+ PrintF("Set a breakpoint at %p\n", reinterpret_cast<void*>(location));
+}
+
+
+void Simulator::ListBreakpoints() {
+ PrintF("Breakpoints:\n");
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ PrintF("%p : %s\n",
+ reinterpret_cast<void*>(breakpoints_.at(i).location),
+ breakpoints_.at(i).enabled ? "enabled" : "disabled");
+ }
+}
+
+
+void Simulator::CheckBreakpoints() {
+ bool hit_a_breakpoint = false;
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if ((breakpoints_.at(i).location == pc_) &&
+ breakpoints_.at(i).enabled) {
+ hit_a_breakpoint = true;
+ // Disable this breakpoint.
+ breakpoints_.at(i).enabled = false;
+ }
+ }
+ if (hit_a_breakpoint) {
+ PrintF("Hit and disabled a breakpoint at %p.\n",
+ reinterpret_cast<void*>(pc_));
+ Debug();
+ }
+}
+
+
+void Simulator::CheckBreakNext() {
+ // If the current instruction is a BL, insert a breakpoint just after it.
+ if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
+ SetBreakpoint(pc_->following());
+ break_on_next_ = false;
+ }
+}
+
+
+void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
+ Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
+ for (Instruction* pc = start; pc < end; pc = pc->following()) {
+ disassembler_decoder_->Decode(pc);
+ }
+}
+
+
+void Simulator::PrintSystemRegisters(bool print_all) {
+ static bool first_run = true;
+
+ static SimSystemRegister last_nzcv;
+ if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
+ fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
+ clr_flag_name,
+ clr_flag_value,
+ nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
+ clr_normal);
+ }
+ last_nzcv = nzcv();
+
+ static SimSystemRegister last_fpcr;
+ if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
+ static const char * rmode[] = {
+ "0b00 (Round to Nearest)",
+ "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)",
+ "0b11 (Round towards Zero)"
+ };
+ ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
+ fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name,
+ clr_flag_value,
+ fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
+ clr_normal);
+ }
+ last_fpcr = fpcr();
+
+ first_run = false;
+}
+
+
+void Simulator::PrintRegisters(bool print_all_regs) {
+ static bool first_run = true;
+ static int64_t last_regs[kNumberOfRegisters];
+
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (print_all_regs || first_run ||
+ (last_regs[i] != xreg(i, Reg31IsStackPointer))) {
+ fprintf(stream_,
+ "# %s%4s:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name,
+ XRegNameForCode(i, Reg31IsStackPointer),
+ clr_reg_value,
+ xreg(i, Reg31IsStackPointer),
+ clr_normal);
+ }
+ // Cache the new register value so the next run can detect any changes.
+ last_regs[i] = xreg(i, Reg31IsStackPointer);
+ }
+ first_run = false;
+}
+
+
+void Simulator::PrintFPRegisters(bool print_all_regs) {
+ static bool first_run = true;
+ static uint64_t last_regs[kNumberOfFPRegisters];
+
+ // Print as many rows of registers as necessary, keeping each individual
+ // register in the same column each time (to make it easy to visually scan
+ // for changes).
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
+ fprintf(stream_,
+ "# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
+ clr_fpreg_name,
+ VRegNameForCode(i),
+ clr_fpreg_value,
+ dreg_bits(i),
+ clr_normal,
+ clr_fpreg_name,
+ DRegNameForCode(i),
+ clr_fpreg_value,
+ dreg(i),
+ clr_fpreg_name,
+ SRegNameForCode(i),
+ clr_fpreg_value,
+ sreg(i),
+ clr_normal);
+ }
+ // Cache the new register value so the next run can detect any changes.
+ last_regs[i] = dreg_bits(i);
+ }
+ first_run = false;
+}
+
+
+void Simulator::PrintProcessorState() {
+ PrintSystemRegisters();
+ PrintRegisters();
+ PrintFPRegisters();
+}
+
+
+void Simulator::PrintWrite(uint8_t* address,
+ uint64_t value,
+ unsigned num_bytes) {
+ // The template is "# value -> address". The template is not directly used
+ // in the printf since compilers tend to struggle with the parametrized
+ // width (%0*).
+ const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n";
+ fprintf(stream_,
+ format,
+ clr_memory_value,
+ num_bytes * 2, // The width in hexa characters.
+ value,
+ clr_normal,
+ clr_memory_address,
+ address,
+ clr_normal);
+}
+
+
+// Visitors---------------------------------------------------------------------
+
+void Simulator::VisitUnimplemented(Instruction* instr) {
+ fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<void*>(instr), instr->InstructionBits());
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitUnallocated(Instruction* instr) {
+ fprintf(stream_, "Unallocated instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<void*>(instr), instr->InstructionBits());
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR:
+ set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
+ break;
+ case ADRP: // Not implemented in the assembler.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranch(Instruction* instr) {
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case BL:
+ set_lr(instr->following());
+ // Fall through.
+ case B:
+ set_pc(instr->ImmPCOffsetTarget());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitConditionalBranch(Instruction* instr) {
+ ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
+ if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ Instruction* target = reg<Instruction*>(instr->Rn());
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BLR: {
+ set_lr(instr->following());
+ if (instr->Rn() == 31) {
+ // BLR XZR is used as a guard for the constant pool. We should never hit
+ // this, but if we do trap to allow debugging.
+ Debug();
+ }
+ // Fall through.
+ }
+ case BR:
+ case RET: set_pc(target); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitTestBranch(Instruction* instr) {
+ unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40();
+ bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: break;
+ case TBNZ: take_branch = !take_branch; break;
+ default: UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitCompareBranch(Instruction* instr) {
+ unsigned rt = instr->Rt();
+ bool take_branch = false;
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w: take_branch = (wreg(rt) == 0); break;
+ case CBZ_x: take_branch = (xreg(rt) == 0); break;
+ case CBNZ_w: take_branch = (wreg(rt) != 0); break;
+ case CBNZ_x: take_branch = (xreg(rt) != 0); break;
+ default: UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::AddSubHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ bool set_flags = instr->FlagsUpdate();
+ int64_t new_val = 0;
+ Instr operation = instr->Mask(AddSubOpMask);
+
+ switch (operation) {
+ case ADD:
+ case ADDS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ op2);
+ break;
+ }
+ case SUB:
+ case SUBS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ ~op2,
+ 1);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ set_reg(reg_size, instr->Rd(), new_val, instr->RdMode());
+}
+
+
+void Simulator::VisitAddSubShifted(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t op2 = ShiftOperand(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Shift>(instr->ShiftDP()),
+ instr->ImmDPShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubImmediate(Instruction* instr) {
+ int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubExtended(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t op2 = ExtendValue(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Extend>(instr->ExtendMode()),
+ instr->ImmExtendShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubWithCarry(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t op2 = reg(reg_size, instr->Rm());
+ int64_t new_val;
+
+ if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
+ op2 = ~op2;
+ }
+
+ new_val = AddWithCarry(reg_size,
+ instr->FlagsUpdate(),
+ reg(reg_size, instr->Rn()),
+ op2,
+ nzcv().C());
+
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitLogicalShifted(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ Shift shift_type = static_cast<Shift>(instr->ShiftDP());
+ unsigned shift_amount = instr->ImmDPShift();
+ int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
+ shift_amount);
+ if (instr->Mask(NOT) == NOT) {
+ op2 = ~op2;
+ }
+ LogicalHelper(instr, op2);
+}
+
+
+void Simulator::VisitLogicalImmediate(Instruction* instr) {
+ LogicalHelper(instr, instr->ImmLogical());
+}
+
+
+void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t op1 = reg(reg_size, instr->Rn());
+ int64_t result = 0;
+ bool update_flags = false;
+
+ // Switch on the logical operation, stripping out the NOT bit, as it has a
+ // different meaning for logical immediate instructions.
+ switch (instr->Mask(LogicalOpMask & ~NOT)) {
+ case ANDS: update_flags = true; // Fall through.
+ case AND: result = op1 & op2; break;
+ case ORR: result = op1 | op2; break;
+ case EOR: result = op1 ^ op2; break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ if (update_flags) {
+ nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetZ(CalcZFlag(result));
+ nzcv().SetC(0);
+ nzcv().SetV(0);
+ }
+
+ set_reg(reg_size, instr->Rd(), result, instr->RdMode());
+}
+
+
+void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
+}
+
+
+void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
+ ConditionalCompareHelper(instr, instr->ImmCondCmp());
+}
+
+
+void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t op1 = reg(reg_size, instr->Rn());
+
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ // If the condition passes, set the status flags to the result of comparing
+ // the operands.
+ if (instr->Mask(ConditionalCompareMask) == CCMP) {
+ AddWithCarry(reg_size, true, op1, ~op2, 1);
+ } else {
+ ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
+ AddWithCarry(reg_size, true, op1, op2, 0);
+ }
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ }
+}
+
+
+void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ int offset = instr->ImmLSUnsigned() << instr->SizeLS();
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), Offset);
+}
+
+
+void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
+}
+
+
+void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
+ unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
+
+ int64_t offset = ExtendValue(kXRegSizeInBits, xreg(instr->Rm()), ext,
+ shift_amount);
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::LoadStoreHelper(Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode) {
+ unsigned srcdst = instr->Rt();
+ unsigned addr_reg = instr->Rn();
+ uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
+ int num_bytes = 1 << instr->SizeLS();
+ uint8_t* stack = NULL;
+
+ // Handle the writeback for stores before the store. On a CPU the writeback
+ // and the store are atomic, but when running on the simulator it is possible
+ // to be interrupted in between. The simulator is not thread safe and V8 does
+ // not require it to be to run JavaScript therefore the profiler may sample
+ // the "simulated" CPU in the middle of load/store with writeback. The code
+ // below ensures that push operations are safe even when interrupted: the
+ // stack pointer will be decremented before adding an element to the stack.
+ if (instr->IsStore()) {
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+
+ // For store the address post writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+ }
+
+ LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break;
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
+ case LDRSB_w: {
+ set_wreg(srcdst,
+ ExtendValue(kWRegSizeInBits, MemoryRead8(address), SXTB));
+ break;
+ }
+ case LDRSB_x: {
+ set_xreg(srcdst,
+ ExtendValue(kXRegSizeInBits, MemoryRead8(address), SXTB));
+ break;
+ }
+ case LDRSH_w: {
+ set_wreg(srcdst,
+ ExtendValue(kWRegSizeInBits, MemoryRead16(address), SXTH));
+ break;
+ }
+ case LDRSH_x: {
+ set_xreg(srcdst,
+ ExtendValue(kXRegSizeInBits, MemoryRead16(address), SXTH));
+ break;
+ }
+ case LDRSW_x: {
+ set_xreg(srcdst,
+ ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW));
+ break;
+ }
+ case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
+ case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
+ case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
+ case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
+ default: UNIMPLEMENTED();
+ }
+
+ // Handle the writeback for loads after the load to ensure safe pop
+ // operation even when interrupted in the middle of it. The stack pointer
+ // is only updated after the load so pop(fp) will never break the invariant
+ // sp <= fp expected while walking the stack in the sampler.
+ if (instr->IsLoad()) {
+ // For loads the address pre writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+ }
+
+ // Accesses below the stack pointer (but above the platform stack limit) are
+ // not allowed in the ABI.
+ CheckMemoryAccess(address, stack);
+}
+
+
+void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
+ LoadStorePairHelper(instr, PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
+ LoadStorePairHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::LoadStorePairHelper(Instruction* instr,
+ AddrMode addrmode) {
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ unsigned addr_reg = instr->Rn();
+ int offset = instr->ImmLSPair() << instr->SizeLSPair();
+ uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
+ uint8_t* stack = NULL;
+
+ // Handle the writeback for stores before the store. On a CPU the writeback
+ // and the store are atomic, but when running on the simulator it is possible
+ // to be interrupted in between. The simulator is not thread safe and V8 does
+ // not require it to be to run JavaScript therefore the profiler may sample
+ // the "simulated" CPU in the middle of load/store with writeback. The code
+ // below ensures that push operations are safe even when interrupted: the
+ // stack pointer will be decremented before adding an element to the stack.
+ if (instr->IsStore()) {
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+
+ // For store the address post writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+ }
+
+ LoadStorePairOp op =
+ static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
+
+ // 'rt' and 'rt2' can only be aliased for stores.
+ ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
+
+ switch (op) {
+ case LDP_w: {
+ set_wreg(rt, MemoryRead32(address));
+ set_wreg(rt2, MemoryRead32(address + kWRegSize));
+ break;
+ }
+ case LDP_s: {
+ set_sreg(rt, MemoryReadFP32(address));
+ set_sreg(rt2, MemoryReadFP32(address + kSRegSize));
+ break;
+ }
+ case LDP_x: {
+ set_xreg(rt, MemoryRead64(address));
+ set_xreg(rt2, MemoryRead64(address + kXRegSize));
+ break;
+ }
+ case LDP_d: {
+ set_dreg(rt, MemoryReadFP64(address));
+ set_dreg(rt2, MemoryReadFP64(address + kDRegSize));
+ break;
+ }
+ case LDPSW_x: {
+ set_xreg(rt, ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW));
+ set_xreg(rt2, ExtendValue(kXRegSizeInBits,
+ MemoryRead32(address + kWRegSize), SXTW));
+ break;
+ }
+ case STP_w: {
+ MemoryWrite32(address, wreg(rt));
+ MemoryWrite32(address + kWRegSize, wreg(rt2));
+ break;
+ }
+ case STP_s: {
+ MemoryWriteFP32(address, sreg(rt));
+ MemoryWriteFP32(address + kSRegSize, sreg(rt2));
+ break;
+ }
+ case STP_x: {
+ MemoryWrite64(address, xreg(rt));
+ MemoryWrite64(address + kXRegSize, xreg(rt2));
+ break;
+ }
+ case STP_d: {
+ MemoryWriteFP64(address, dreg(rt));
+ MemoryWriteFP64(address + kDRegSize, dreg(rt2));
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Handle the writeback for loads after the load to ensure safe pop
+ // operation even when interrupted in the middle of it. The stack pointer
+ // is only updated after the load so pop(fp) will never break the invariant
+ // sp <= fp expected while walking the stack in the sampler.
+ if (instr->IsLoad()) {
+ // For loads the address pre writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+ }
+
+ // Accesses below the stack pointer (but above the platform stack limit) are
+ // not allowed in the ABI.
+ CheckMemoryAccess(address, stack);
+}
+
+
+void Simulator::VisitLoadLiteral(Instruction* instr) {
+ uint8_t* address = instr->LiteralAddress();
+ unsigned rt = instr->Rt();
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: set_wreg(rt, MemoryRead32(address)); break;
+ case LDR_x_lit: set_xreg(rt, MemoryRead64(address)); break;
+ case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address)); break;
+ case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address)); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
+ int64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
+ // When the base register is SP the stack pointer is required to be
+ // quadword aligned prior to the address calculation and write-backs.
+ // Misalignment will cause a stack alignment fault.
+ FATAL("ALIGNMENT EXCEPTION");
+ }
+
+ if ((addrmode == Offset) || (addrmode == PreIndex)) {
+ address += offset;
+ }
+
+ return reinterpret_cast<uint8_t*>(address);
+}
+
+
+void Simulator::LoadStoreWriteBack(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
+ ASSERT(offset != 0);
+ uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ set_reg(addr_reg, address + offset, Reg31IsStackPointer);
+ }
+}
+
+
+void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
+ if ((address >= stack_limit_) && (address < stack)) {
+ fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
+ fprintf(stream_, " sp is here: 0x%16p\n", stack);
+ fprintf(stream_, " access was here: 0x%16p\n", address);
+ fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_);
+ fprintf(stream_, "\n");
+ FATAL("ACCESS BELOW STACK POINTER");
+ }
+}
+
+
+uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
+ ASSERT(address != NULL);
+ ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+ uint64_t read = 0;
+ memcpy(&read, address, num_bytes);
+ return read;
+}
+
+
+uint8_t Simulator::MemoryRead8(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint8_t));
+}
+
+
+uint16_t Simulator::MemoryRead16(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint16_t));
+}
+
+
+uint32_t Simulator::MemoryRead32(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint32_t));
+}
+
+
+float Simulator::MemoryReadFP32(uint8_t* address) {
+ return rawbits_to_float(MemoryRead32(address));
+}
+
+
+uint64_t Simulator::MemoryRead64(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint64_t));
+}
+
+
+double Simulator::MemoryReadFP64(uint8_t* address) {
+ return rawbits_to_double(MemoryRead64(address));
+}
+
+
+void Simulator::MemoryWrite(uint8_t* address,
+ uint64_t value,
+ unsigned num_bytes) {
+ ASSERT(address != NULL);
+ ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+
+ LogWrite(address, value, num_bytes);
+ memcpy(address, &value, num_bytes);
+}
+
+
+void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
+ MemoryWrite(address, value, sizeof(uint32_t));
+}
+
+
+void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
+ MemoryWrite32(address, float_to_rawbits(value));
+}
+
+
+void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
+ MemoryWrite(address, value, sizeof(uint64_t));
+}
+
+
+void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
+ MemoryWrite64(address, double_to_rawbits(value));
+}
+
+
+void Simulator::VisitMoveWideImmediate(Instruction* instr) {
+ MoveWideImmediateOp mov_op =
+ static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
+ int64_t new_xn_val = 0;
+
+ bool is_64_bits = instr->SixtyFourBits() == 1;
+ // Shift is limited for W operations.
+ ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
+
+ // Get the shifted immediate.
+ int64_t shift = instr->ShiftMoveWide() * 16;
+ int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
+
+ // Compute the new value.
+ switch (mov_op) {
+ case MOVN_w:
+ case MOVN_x: {
+ new_xn_val = ~shifted_imm16;
+ if (!is_64_bits) new_xn_val &= kWRegMask;
+ break;
+ }
+ case MOVK_w:
+ case MOVK_x: {
+ unsigned reg_code = instr->Rd();
+ int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
+ : wreg(reg_code);
+ new_xn_val = (prev_xn_val & ~(0xffffL << shift)) | shifted_imm16;
+ break;
+ }
+ case MOVZ_w:
+ case MOVZ_x: {
+ new_xn_val = shifted_imm16;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ // Update the destination register.
+ set_xreg(instr->Rd(), new_xn_val);
+}
+
+
+void Simulator::VisitConditionalSelect(Instruction* instr) {
+ uint64_t new_val = xreg(instr->Rn());
+
+ if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
+ new_val = xreg(instr->Rm());
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: break;
+ case CSINC_w:
+ case CSINC_x: new_val++; break;
+ case CSINV_w:
+ case CSINV_x: new_val = ~new_val; break;
+ case CSNEG_w:
+ case CSNEG_x: new_val = -new_val; break;
+ default: UNIMPLEMENTED();
+ }
+ }
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitDataProcessing1Source(Instruction* instr) {
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break;
+ case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break;
+ case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
+ case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
+ case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
+ case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
+ case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
+ case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
+ break;
+ case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
+ break;
+ case CLS_w: {
+ set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSizeInBits));
+ break;
+ }
+ case CLS_x: {
+ set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits));
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
+ ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
+ uint64_t result = 0;
+ for (unsigned i = 0; i < num_bits; i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+
+uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
+ // Split the 64-bit value into an 8-bit array, where b[0] is the least
+ // significant byte, and b[7] is the most significant.
+ uint8_t bytes[8];
+ uint64_t mask = 0xff00000000000000UL;
+ for (int i = 7; i >= 0; i--) {
+ bytes[i] = (value & mask) >> (i * 8);
+ mask >>= 8;
+ }
+
+ // Permutation tables for REV instructions.
+ // permute_table[Reverse16] is used by REV16_x, REV16_w
+ // permute_table[Reverse32] is used by REV32_x, REV_w
+ // permute_table[Reverse64] is used by REV_x
+ ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
+ static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
+ {4, 5, 6, 7, 0, 1, 2, 3},
+ {0, 1, 2, 3, 4, 5, 6, 7} };
+ uint64_t result = 0;
+ for (int i = 0; i < 8; i++) {
+ result <<= 8;
+ result |= bytes[permute_table[mode][i]];
+ }
+ return result;
+}
+
+
+void Simulator::VisitDataProcessing2Source(Instruction* instr) {
+ Shift shift_op = NO_SHIFT;
+ int64_t result = 0;
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ case SDIV_w: {
+ int32_t rn = wreg(instr->Rn());
+ int32_t rm = wreg(instr->Rm());
+ if ((rn == kWMinInt) && (rm == -1)) {
+ result = kWMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case SDIV_x: {
+ int64_t rn = xreg(instr->Rn());
+ int64_t rm = xreg(instr->Rm());
+ if ((rn == kXMinInt) && (rm == -1)) {
+ result = kXMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_w: {
+ uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn()));
+ uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_x: {
+ uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn()));
+ uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case LSLV_w:
+ case LSLV_x: shift_op = LSL; break;
+ case LSRV_w:
+ case LSRV_x: shift_op = LSR; break;
+ case ASRV_w:
+ case ASRV_x: shift_op = ASR; break;
+ case RORV_w:
+ case RORV_x: shift_op = ROR; break;
+ default: UNIMPLEMENTED();
+ }
+
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ if (shift_op != NO_SHIFT) {
+ // Shift distance encoded in the least-significant five/six bits of the
+ // register.
+ int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
+ unsigned shift = wreg(instr->Rm()) & mask;
+ result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
+ shift);
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+// The algorithm used is described in section 8.2 of
+// Hacker's Delight, by Henry S. Warren, Jr.
+// It assumes that a right shift on a signed integer is an arithmetic shift.
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xffffffffL;
+ u1 = u >> 32;
+ v0 = v & 0xffffffffL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xffffffffL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+
+void Simulator::VisitDataProcessing3Source(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+
+ int64_t result = 0;
+ // Extract and sign- or zero-extend 32-bit arguments for widening operations.
+ uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
+ uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
+ int64_t rn_s32 = reg<int32_t>(instr->Rn());
+ int64_t rm_s32 = reg<int32_t>(instr->Rm());
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x:
+ result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case MSUB_w:
+ case MSUB_x:
+ result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
+ case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
+ case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
+ case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
+ case SMULH_x:
+ ASSERT(instr->Ra() == kZeroRegCode);
+ result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
+ break;
+ default: UNIMPLEMENTED();
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitBitfield(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
+ int64_t R = instr->ImmR();
+ int64_t S = instr->ImmS();
+ int64_t diff = S - R;
+ int64_t mask;
+ if (diff >= 0) {
+ mask = diff < reg_size - 1 ? (1L << (diff + 1)) - 1
+ : reg_mask;
+ } else {
+ mask = ((1L << (S + 1)) - 1);
+ mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
+ diff += reg_size;
+ }
+
+ // inzero indicates if the extracted bitfield is inserted into the
+ // destination register value or in zero.
+ // If extend is true, extend the sign of the extracted bitfield.
+ bool inzero = false;
+ bool extend = false;
+ switch (instr->Mask(BitfieldMask)) {
+ case BFM_x:
+ case BFM_w:
+ break;
+ case SBFM_x:
+ case SBFM_w:
+ inzero = true;
+ extend = true;
+ break;
+ case UBFM_x:
+ case UBFM_w:
+ inzero = true;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
+ int64_t src = reg(reg_size, instr->Rn());
+ // Rotate source bitfield into place.
+ int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R));
+ // Determine the sign extension.
+ int64_t topbits = ((1L << (reg_size - diff - 1)) - 1) << (diff + 1);
+ int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
+
+ // Merge sign extension, dest/zero and bitfield.
+ result = signbits | (result & mask) | (dst & ~mask);
+
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitExtract(Instruction* instr) {
+ unsigned lsb = instr->ImmS();
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ set_reg(reg_size,
+ instr->Rd(),
+ (static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) |
+ (reg(reg_size, instr->Rn()) << (reg_size - lsb)));
+}
+
+
+void Simulator::VisitFPImmediate(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dest = instr->Rd();
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
+ case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPIntegerConvert(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ FPRounding round = fpcr().RMode();
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
+ case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
+ case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
+ case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
+ case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
+ case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
+ case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
+ case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
+ case FCVTMS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
+ case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
+ case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
+ case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
+ case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
+ case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
+ case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
+ case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
+ case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
+ case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
+ case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
+ case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
+ case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
+ case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
+ case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
+ case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
+ case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
+ case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
+ case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
+ case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
+
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
+ case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
+ case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
+ case UCVTF_dw: {
+ set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
+ break;
+ }
+ case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
+ case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
+ case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
+ case UCVTF_sw: {
+ set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
+ break;
+ }
+
+ default: UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+ int fbits = 64 - instr->FPScale();
+
+ FPRounding round = fpcr().RMode();
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx_fixed:
+ set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
+ break;
+ case SCVTF_dw_fixed:
+ set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
+ break;
+ case UCVTF_dx_fixed:
+ set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
+ break;
+ case UCVTF_dw_fixed: {
+ set_dreg(dst,
+ UFixedToDouble(reg<uint32_t>(src), fbits, round));
+ break;
+ }
+ case SCVTF_sx_fixed:
+ set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
+ break;
+ case SCVTF_sw_fixed:
+ set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
+ break;
+ case UCVTF_sx_fixed:
+ set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
+ break;
+ case UCVTF_sw_fixed: {
+ set_sreg(dst,
+ UFixedToFloat(reg<uint32_t>(src), fbits, round));
+ break;
+ }
+ default: UNREACHABLE();
+ }
+}
+
+
+int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxInt) {
+ return kWMaxInt;
+ } else if (value < kWMinInt) {
+ return kWMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int32_t>(value);
+}
+
+
+int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxInt) {
+ return kXMaxInt;
+ } else if (value < kXMinInt) {
+ return kXMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int64_t>(value);
+}
+
+
+uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxUInt) {
+ return kWMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
+}
+
+
+uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxUInt) {
+ return kXMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
+}
+
+
+void Simulator::VisitFPCompare(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
+ : kSRegSizeInBits;
+ double fn_val = fpreg(reg_size, instr->Rn());
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s:
+ case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break;
+ case FCMP_s_zero:
+ case FCMP_d_zero: FPCompare(fn_val, 0.0); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalCompare(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: {
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ // If the condition passes, set the status flags to the result of
+ // comparing the operands.
+ unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
+ : kSRegSizeInBits;
+ FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ }
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalSelect(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ Instr selected;
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ selected = instr->Rn();
+ } else {
+ selected = instr->Rm();
+ }
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
+ case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ case FMOV_s: set_sreg(fd, sreg(fn)); break;
+ case FMOV_d: set_dreg(fd, dreg(fn)); break;
+ case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break;
+ case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break;
+ case FNEG_s: set_sreg(fd, -sreg(fn)); break;
+ case FNEG_d: set_dreg(fd, -dreg(fn)); break;
+ case FSQRT_s: set_sreg(fd, FPSqrt(sreg(fn))); break;
+ case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break;
+ case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
+ case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
+ case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
+ case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
+ case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
+ case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break;
+ case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break;
+ case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+// Assemble the specified IEEE-754 components into the target type and apply
+// appropriate rounding.
+// sign: 0 = positive, 1 = negative
+// exponent: Unbiased IEEE-754 exponent.
+// mantissa: The mantissa of the input. The top bit (which is not encoded for
+// normal IEEE-754 values) must not be omitted. This bit has the
+// value 'pow(2, exponent)'.
+//
+// The input value is assumed to be a normalized value. That is, the input may
+// not be infinity or NaN. If the source value is subnormal, it must be
+// normalized before calling this function such that the highest set bit in the
+// mantissa has the value 'pow(2, exponent)'.
+//
+// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
+// calling a templated FPRound.
+template <class T, int ebits, int mbits>
+static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
+ FPRounding round_mode) {
+ ASSERT((sign == 0) || (sign == 1));
+
+ // Only the FPTieEven rounding mode is implemented.
+ ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ // Rounding can promote subnormals to normals, and normals to infinities. For
+ // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
+ // encodable as a float, but rounding based on the low-order mantissa bits
+ // could make it overflow. With ties-to-even rounding, this value would become
+ // an infinity.
+
+ // ---- Rounding Method ----
+ //
+ // The exponent is irrelevant in the rounding operation, so we treat the
+ // lowest-order bit that will fit into the result ('onebit') as having
+ // the value '1'. Similarly, the highest-order bit that won't fit into
+ // the result ('halfbit') has the value '0.5'. The 'point' sits between
+ // 'onebit' and 'halfbit':
+ //
+ // These bits fit into the result.
+ // |---------------------|
+ // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ // ||
+ // / |
+ // / halfbit
+ // onebit
+ //
+ // For subnormal outputs, the range of representable bits is smaller and
+ // the position of onebit and halfbit depends on the exponent of the
+ // input, but the method is otherwise similar.
+ //
+ // onebit(frac)
+ // |
+ // | halfbit(frac) halfbit(adjusted)
+ // | / /
+ // | | |
+ // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
+ // 0b00.0... -> 0b00.0... -> 0b00
+ // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
+ // 0b00.1... -> 0b00.1... -> 0b01
+ // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
+ // 0b01.0... -> 0b01.0... -> 0b01
+ // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
+ // 0b01.1... -> 0b01.1... -> 0b10
+ // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
+ // 0b10.0... -> 0b10.0... -> 0b10
+ // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
+ // 0b10.1... -> 0b10.1... -> 0b11
+ // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
+ // ... / | / |
+ // / | / |
+ // / |
+ // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
+ //
+ // mantissa = (mantissa >> shift) + halfbit(adjusted);
+
+ static const int mantissa_offset = 0;
+ static const int exponent_offset = mantissa_offset + mbits;
+ static const int sign_offset = exponent_offset + ebits;
+ STATIC_ASSERT(sign_offset == (sizeof(T) * kByteSize - 1));
+
+ // Bail out early for zero inputs.
+ if (mantissa == 0) {
+ return sign << sign_offset;
+ }
+
+ // If all bits in the exponent are set, the value is infinite or NaN.
+ // This is true for all binary IEEE-754 formats.
+ static const int infinite_exponent = (1 << ebits) - 1;
+ static const int max_normal_exponent = infinite_exponent - 1;
+
+ // Apply the exponent bias to encode it for the result. Doing this early makes
+ // it easy to detect values that will be infinite or subnormal.
+ exponent += max_normal_exponent >> 1;
+
+ if (exponent > max_normal_exponent) {
+ // Overflow: The input is too large for the result type to represent. The
+ // FPTieEven rounding mode handles overflows using infinities.
+ exponent = infinite_exponent;
+ mantissa = 0;
+ return (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ (mantissa << mantissa_offset);
+ }
+
+ // Calculate the shift required to move the top mantissa bit to the proper
+ // place in the destination type.
+ const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64);
+ int shift = highest_significant_bit - mbits;
+
+ if (exponent <= 0) {
+ // The output will be subnormal (before rounding).
+
+ // For subnormal outputs, the shift must be adjusted by the exponent. The +1
+ // is necessary because the exponent of a subnormal value (encoded as 0) is
+ // the same as the exponent of the smallest normal value (encoded as 1).
+ shift += -exponent + 1;
+
+ // Handle inputs that would produce a zero output.
+ //
+ // Shifts higher than highest_significant_bit+1 will always produce a zero
+ // result. A shift of exactly highest_significant_bit+1 might produce a
+ // non-zero result after rounding.
+ if (shift > (highest_significant_bit + 1)) {
+ // The result will always be +/-0.0.
+ return sign << sign_offset;
+ }
+
+ // Properly encode the exponent for a subnormal output.
+ exponent = 0;
+ } else {
+ // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
+ // normal values.
+ mantissa &= ~(1UL << highest_significant_bit);
+ }
+
+ if (shift > 0) {
+ // We have to shift the mantissa to the right. Some precision is lost, so we
+ // need to apply rounding.
+ uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
+ uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
+ uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
+ T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
+
+ T result = (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset);
+
+ // A very large mantissa can overflow during rounding. If this happens, the
+ // exponent should be incremented and the mantissa set to 1.0 (encoded as
+ // 0). Applying halfbit_adjusted after assembling the float has the nice
+ // side-effect that this case is handled for free.
+ //
+ // This also handles cases where a very large finite value overflows to
+ // infinity, or where a very large subnormal value overflows to become
+ // normal.
+ return result + halfbit_adjusted;
+ } else {
+ // We have to shift the mantissa to the left (or not at all). The input
+ // mantissa is exactly representable in the output mantissa, so apply no
+ // rounding correction.
+ return (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa << -shift) << mantissa_offset);
+ }
+}
+
+
+// See FPRound for a description of this function.
+static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int64_t bits =
+ FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_double(bits);
+}
+
+
+// See FPRound for a description of this function.
+static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int32_t bits =
+ FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_float(bits);
+}
+
+
+double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToDouble(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToDouble(-src, fbits, round);
+ }
+}
+
+
+double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int64_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToDouble(0, exponent, src, round);
+}
+
+
+float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToFloat(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToFloat(-src, fbits, round);
+ }
+}
+
+
+float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0f;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int32_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToFloat(0, exponent, src, round);
+}
+
+
+double Simulator::FPRoundInt(double value, FPRounding round_mode) {
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity)) {
+ return value;
+ } else if (std::isnan(value)) {
+ return FPProcessNaN(value);
+ }
+
+ double int_result = floor(value);
+ double error = value - int_result;
+ switch (round_mode) {
+ case FPTieAway: {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is positive, round up.
+ if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPTieEven: {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ if ((error > 0.5) ||
+ ((error == 0.5) && (fmod(int_result, 2) != 0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPZero: {
+ // If value > 0 then we take floor(value)
+ // otherwise, ceil(value)
+ if (value < 0) {
+ int_result = ceil(value);
+ }
+ break;
+ }
+ case FPNegativeInfinity: {
+ // We always use floor(value).
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+ return int_result;
+}
+
+
+double Simulator::FPToDouble(float value) {
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (fpcr().DN()) return kFP64DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ uint32_t raw = float_to_rawbits(value);
+
+ uint64_t sign = raw >> 31;
+ uint64_t exponent = (1 << 11) - 1;
+ uint64_t payload = unsigned_bitextract_64(21, 0, raw);
+ payload <<= (52 - 23); // The unused low-order bits should be 0.
+ payload |= (1L << 51); // Force a quiet NaN.
+
+ return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ case FP_INFINITE: {
+ // All other inputs are preserved in a standard cast, because every value
+ // representable using an IEEE-754 float is also representable using an
+ // IEEE-754 double.
+ return static_cast<double>(value);
+ }
+ }
+
+ UNREACHABLE();
+ return static_cast<double>(value);
+}
+
+
+float Simulator::FPToFloat(double value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (fpcr().DN()) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint64_t raw = double_to_rawbits(value);
+
+ uint32_t sign = raw >> 63;
+ uint32_t exponent = (1 << 8) - 1;
+ uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
+ payload |= (1 << 22); // Force a quiet NaN.
+
+ return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_INFINITE: {
+ // In a C++ cast, any value representable in the target type will be
+ // unchanged. This is always the case for +/-0.0 and infinities.
+ return static_cast<float>(value);
+ }
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-float as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+ uint64_t raw = double_to_rawbits(value);
+ // Extract the IEEE-754 double components.
+ uint32_t sign = raw >> 63;
+ // Extract the exponent and remove the IEEE-754 encoding bias.
+ int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
+ // Extract the mantissa and add the implicit '1' bit.
+ uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
+ if (std::fpclassify(value) == FP_NORMAL) {
+ mantissa |= (1UL << 52);
+ }
+ return FPRoundToFloat(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ UNREACHABLE();
+ return value;
+}
+
+
+void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+
+ // Fmaxnm and Fminnm have special NaN handling.
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); return;
+ case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); return;
+ case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); return;
+ case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); return;
+ default:
+ break; // Fall through.
+ }
+
+ if (FPProcessNaNs(instr)) return;
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FADD_s: set_sreg(fd, FPAdd(sreg(fn), sreg(fm))); break;
+ case FADD_d: set_dreg(fd, FPAdd(dreg(fn), dreg(fm))); break;
+ case FSUB_s: set_sreg(fd, FPSub(sreg(fn), sreg(fm))); break;
+ case FSUB_d: set_dreg(fd, FPSub(dreg(fn), dreg(fm))); break;
+ case FMUL_s: set_sreg(fd, FPMul(sreg(fn), sreg(fm))); break;
+ case FMUL_d: set_dreg(fd, FPMul(dreg(fn), dreg(fm))); break;
+ case FDIV_s: set_sreg(fd, FPDiv(sreg(fn), sreg(fm))); break;
+ case FDIV_d: set_dreg(fd, FPDiv(dreg(fn), dreg(fm))); break;
+ case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break;
+ case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break;
+ case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break;
+ case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break;
+ case FMAXNM_s:
+ case FMAXNM_d:
+ case FMINNM_s:
+ case FMINNM_d:
+ // These were handled before the standard FPProcessNaNs() stage.
+ UNREACHABLE();
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ unsigned fa = instr->Ra();
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ // fd = fa +/- (fn * fm)
+ case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break;
+ case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break;
+ case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break;
+ case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break;
+ // Negated variants of the above.
+ case FNMADD_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
+ break;
+ case FNMSUB_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
+ break;
+ case FNMADD_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
+ break;
+ case FNMSUB_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
+ break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+template <typename T>
+T Simulator::FPAdd(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
+ // inf + -inf returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 + op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPDiv(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
+ // inf / inf and 0.0 / 0.0 return the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 / op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMax(T a, T b) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(a) && !std::isnan(b));
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return +0.0.
+ return 0.0;
+ } else {
+ return (a > b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMaxNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64NegativeInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64NegativeInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMax(a, b);
+}
+
+template <typename T>
+T Simulator::FPMin(T a, T b) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!isnan(a) && !isnan(b));
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return -0.0.
+ return -0.0;
+ } else {
+ return (a < b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMinNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64PositiveInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64PositiveInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMin(a, b);
+}
+
+
+template <typename T>
+T Simulator::FPMul(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 * op2;
+ }
+}
+
+
+template<typename T>
+T Simulator::FPMulAdd(T a, T op1, T op2) {
+ T result = FPProcessNaNs3(a, op1, op2);
+
+ T sign_a = copysign(1.0, a);
+ T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
+ bool isinf_prod = std::isinf(op1) || std::isinf(op2);
+ bool operation_generates_nan =
+ (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
+ (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
+ (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
+
+ if (std::isnan(result)) {
+ // Generated NaNs override quiet NaNs propagated from a.
+ if (operation_generates_nan && IsQuietNaN(a)) {
+ return FPDefaultNaN<T>();
+ } else {
+ return result;
+ }
+ }
+
+ // If the operation would produce a NaN, return the default NaN.
+ if (operation_generates_nan) {
+ return FPDefaultNaN<T>();
+ }
+
+ // Work around broken fma implementations for exact zero results: The sign of
+ // exact 0.0 results is positive unless both a and op1 * op2 are negative.
+ if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
+ return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
+ }
+
+ result = FusedMultiplyAdd(op1, op2, a);
+ ASSERT(!std::isnan(result));
+
+ // Work around broken fma implementations for rounded zero results: If a is
+ // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
+ if ((a == 0.0) && (result == 0.0)) {
+ return copysign(0.0, sign_prod);
+ }
+
+ return result;
+}
+
+
+template <typename T>
+T Simulator::FPSqrt(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op < 0.0) {
+ return FPDefaultNaN<T>();
+ } else {
+ return std::sqrt(op);
+ }
+}
+
+
+template <typename T>
+T Simulator::FPSub(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
+ // inf - inf returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 - op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaN(T op) {
+ ASSERT(std::isnan(op));
+ return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaNs(T op1, T op2) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op1)) {
+ ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else {
+ return 0.0;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaNs3(T op1, T op2, T op3) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (IsSignallingNaN(op3)) {
+ return FPProcessNaN(op3);
+ } else if (std::isnan(op1)) {
+ ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op3)) {
+ ASSERT(IsQuietNaN(op3));
+ return FPProcessNaN(op3);
+ } else {
+ return 0.0;
+ }
+}
+
+
+bool Simulator::FPProcessNaNs(Instruction* instr) {
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ bool done = false;
+
+ if (instr->Mask(FP64) == FP64) {
+ double result = FPProcessNaNs(dreg(fn), dreg(fm));
+ if (std::isnan(result)) {
+ set_dreg(fd, result);
+ done = true;
+ }
+ } else {
+ float result = FPProcessNaNs(sreg(fn), sreg(fm));
+ if (std::isnan(result)) {
+ set_sreg(fd, result);
+ done = true;
+ }
+ }
+
+ return done;
+}
+
+
+void Simulator::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
+ case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
+ default: UNIMPLEMENTED();
+ }
+ break;
+ }
+ case MSR: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break;
+ case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break;
+ default: UNIMPLEMENTED();
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: break;
+ default: UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ __sync_synchronize();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
+bool Simulator::GetValue(const char* desc, int64_t* value) {
+ int regnum = CodeFromName(desc);
+ if (regnum >= 0) {
+ unsigned code = regnum;
+ if (code == kZeroRegCode) {
+ // Catch the zero register and return 0.
+ *value = 0;
+ return true;
+ } else if (code == kSPRegInternalCode) {
+ // Translate the stack pointer code to 31, for Reg31IsStackPointer.
+ code = 31;
+ }
+ if (desc[0] == 'w') {
+ *value = wreg(code, Reg31IsStackPointer);
+ } else {
+ *value = xreg(code, Reg31IsStackPointer);
+ }
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ } else {
+ return SScanF(desc, "%" SCNu64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+}
+
+
+bool Simulator::PrintValue(const char* desc) {
+ if (strcmp(desc, "csp") == 0) {
+ ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ PrintF("%s csp:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
+ return true;
+ } else if (strcmp(desc, "wcsp") == 0) {
+ ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ PrintF("%s wcsp:%s 0x%08" PRIx32 "%s\n",
+ clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
+ return true;
+ }
+
+ int i = CodeFromName(desc);
+ STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters);
+ if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false;
+
+ if (desc[0] == 'v') {
+ PrintF("%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
+ clr_fpreg_name, VRegNameForCode(i),
+ clr_fpreg_value, double_to_rawbits(dreg(i)),
+ clr_normal,
+ clr_fpreg_name, DRegNameForCode(i),
+ clr_fpreg_value, dreg(i),
+ clr_fpreg_name, SRegNameForCode(i),
+ clr_fpreg_value, sreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 'd') {
+ PrintF("%s %s:%s %g%s\n",
+ clr_fpreg_name, DRegNameForCode(i),
+ clr_fpreg_value, dreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 's') {
+ PrintF("%s %s:%s %g%s\n",
+ clr_fpreg_name, SRegNameForCode(i),
+ clr_fpreg_value, sreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 'w') {
+ PrintF("%s %s:%s 0x%08" PRIx32 "%s\n",
+ clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
+ return true;
+ } else {
+ // X register names have a wide variety of starting characters, but anything
+ // else will be an X register.
+ PrintF("%s %s:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
+ return true;
+ }
+}
+
+
+void Simulator::Debug() {
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ bool done = false;
+ bool cleared_log_disasm_bit = false;
+
+ while (!done) {
+ // Disassemble the next instruction to execute before doing anything else.
+ PrintInstructionsAt(pc_, 1);
+ // Read the command line.
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ // Repeat last command by default.
+ char* last_input = last_debugger_input();
+ if (strcmp(line, "\n") == 0 && (last_input != NULL)) {
+ DeleteArray(line);
+ line = last_input;
+ } else {
+ // Update the latest command ran
+ set_last_debugger_input(line);
+ }
+
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+
+ // stepi / si ------------------------------------------------------------
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ // We are about to execute instructions, after which by default we
+ // should increment the pc_. If it was set when reaching this debug
+ // instruction, it has not been cleared because this instruction has not
+ // completed yet. So clear it manually.
+ pc_modified_ = false;
+
+ if (argc == 1) {
+ ExecuteInstruction();
+ } else {
+ int64_t number_of_instructions_to_execute = 1;
+ GetValue(arg1, &number_of_instructions_to_execute);
+
+ set_log_parameters(log_parameters() | LOG_DISASM);
+ while (number_of_instructions_to_execute-- > 0) {
+ ExecuteInstruction();
+ }
+ set_log_parameters(log_parameters() & ~LOG_DISASM);
+ PrintF("\n");
+ }
+
+ // If it was necessary, the pc has already been updated or incremented
+ // when executing the instruction. So we do not want it to be updated
+ // again. It will be cleared when exiting.
+ pc_modified_ = true;
+
+ // next / n --------------------------------------------------------------
+ } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
+ // Tell the simulator to break after the next executed BL.
+ break_on_next_ = true;
+ // Continue.
+ done = true;
+
+ // continue / cont / c ---------------------------------------------------
+ } else if ((strcmp(cmd, "continue") == 0) ||
+ (strcmp(cmd, "cont") == 0) ||
+ (strcmp(cmd, "c") == 0)) {
+ // Leave the debugger shell.
+ done = true;
+
+ // disassemble / disasm / di ---------------------------------------------
+ } else if (strcmp(cmd, "disassemble") == 0 ||
+ strcmp(cmd, "disasm") == 0 ||
+ strcmp(cmd, "di") == 0) {
+ int64_t n_of_instrs_to_disasm = 10; // default value.
+ int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
+ if (argc >= 2) { // disasm <n of instrs>
+ GetValue(arg1, &n_of_instrs_to_disasm);
+ }
+ if (argc >= 3) { // disasm <n of instrs> <address>
+ GetValue(arg2, &address);
+ }
+
+ // Disassemble.
+ PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
+ n_of_instrs_to_disasm);
+ PrintF("\n");
+
+ // print / p -------------------------------------------------------------
+ } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
+ if (argc == 2) {
+ if (strcmp(arg1, "all") == 0) {
+ PrintRegisters(true);
+ PrintFPRegisters(true);
+ } else {
+ if (!PrintValue(arg1)) {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ PrintF(
+ "print <register>\n"
+ " Print the content of a register. (alias 'p')\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n");
+ }
+
+ // printobject / po ------------------------------------------------------
+ } else if ((strcmp(cmd, "printobject") == 0) ||
+ (strcmp(cmd, "po") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ PrintF("%s: \n", arg1);
+#ifdef DEBUG
+ obj->PrintLn();
+#else
+ obj->ShortPrint();
+ PrintF("\n");
+#endif
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("printobject <value>\n"
+ "printobject <register>\n"
+ " Print details about the value. (alias 'po')\n");
+ }
+
+ // stack / mem ----------------------------------------------------------
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = NULL;
+ int64_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(jssp());
+
+ } else { // "mem"
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words = 0;
+ if (argc == next_arg) {
+ words = 10;
+ } else if (argc == next_arg + 1) {
+ if (!GetValue(argv[next_arg], &words)) {
+ PrintF("%s unrecognized\n", argv[next_arg]);
+ PrintF("Printing 10 double words by default");
+ words = 10;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
+ reinterpret_cast<uint64_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ int64_t value = *cur;
+ Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ PrintF(" (");
+ if ((value & kSmiTagMask) == 0) {
+ STATIC_ASSERT(kSmiValueSize == 32);
+ int32_t untagged = (value >> kSmiShift) & 0xffffffff;
+ PrintF("smi %" PRId32, untagged);
+ } else {
+ obj->ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ // trace / t -------------------------------------------------------------
+ } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
+ if ((log_parameters() & (LOG_DISASM | LOG_REGS)) !=
+ (LOG_DISASM | LOG_REGS)) {
+ PrintF("Enabling disassembly and registers tracing\n");
+ set_log_parameters(log_parameters() | LOG_DISASM | LOG_REGS);
+ } else {
+ PrintF("Disabling disassembly and registers tracing\n");
+ set_log_parameters(log_parameters() & ~(LOG_DISASM | LOG_REGS));
+ }
+
+ // break / b -------------------------------------------------------------
+ } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ SetBreakpoint(reinterpret_cast<Instruction*>(value));
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ ListBreakpoints();
+ PrintF("Use `break <address>` to set or disable a breakpoint\n");
+ }
+
+ // gdb -------------------------------------------------------------------
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("Relinquishing control to gdb.\n");
+ OS::DebugBreak();
+ PrintF("Regaining control from gdb.\n");
+
+ // sysregs ---------------------------------------------------------------
+ } else if (strcmp(cmd, "sysregs") == 0) {
+ PrintSystemRegisters();
+
+ // help / h --------------------------------------------------------------
+ } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
+ PrintF(
+ "stepi / si\n"
+ " stepi <n>\n"
+ " Step <n> instructions.\n"
+ "next / n\n"
+ " Continue execution until a BL instruction is reached.\n"
+ " At this point a breakpoint is set just after this BL.\n"
+ " Then execution is resumed. It will probably later hit the\n"
+ " breakpoint just set.\n"
+ "continue / cont / c\n"
+ " Continue execution from here.\n"
+ "disassemble / disasm / di\n"
+ " disassemble <n> <address>\n"
+ " Disassemble <n> instructions from current <address>.\n"
+ " By default <n> is 20 and <address> is the current pc.\n"
+ "print / p\n"
+ " print <register>\n"
+ " Print the content of a register.\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n"
+ "printobject / po\n"
+ " printobject <value>\n"
+ " printobject <register>\n"
+ " Print details about the value.\n"
+ "stack\n"
+ " stack [<words>]\n"
+ " Dump stack content, default dump 10 words\n"
+ "mem\n"
+ " mem <address> [<words>]\n"
+ " Dump memory content, default dump 10 words\n"
+ "trace / t\n"
+ " Toggle disassembly and register tracing\n"
+ "break / b\n"
+ " break : list all breakpoints\n"
+ " break <address> : set / enable / disable a breakpoint.\n"
+ "gdb\n"
+ " Enter gdb.\n"
+ "sysregs\n"
+ " Print all system registers (including NZCV).\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ PrintF("Use 'help' for more information.\n");
+ }
+ }
+ if (cleared_log_disasm_bit == true) {
+ set_log_parameters(log_parameters_ | LOG_DISASM);
+ }
+ }
+}
+
+
+void Simulator::VisitException(Instruction* instr) {
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: {
+ if (instr->ImmException() == kImmExceptionIsDebug) {
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t code;
+ uint32_t parameters;
+
+ memcpy(&code,
+ pc_->InstructionAtOffset(kDebugCodeOffset),
+ sizeof(code));
+ memcpy(&parameters,
+ pc_->InstructionAtOffset(kDebugParamsOffset),
+ sizeof(parameters));
+ char const *message =
+ reinterpret_cast<char const*>(
+ pc_->InstructionAtOffset(kDebugMessageOffset));
+
+ // Always print something when we hit a debug point that breaks.
+ // We are going to break, so printing something is not an issue in
+ // terms of speed.
+ if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
+ if (message != NULL) {
+ PrintF("%sDebugger hit %d: %s%s%s\n",
+ clr_debug_number,
+ code,
+ clr_debug_message,
+ message,
+ clr_normal);
+ } else {
+ PrintF("%sDebugger hit %d.%s\n",
+ clr_debug_number,
+ code,
+ clr_normal);
+ }
+ }
+
+ // Other options.
+ switch (parameters & kDebuggerTracingDirectivesMask) {
+ case TRACE_ENABLE:
+ set_log_parameters(log_parameters() | parameters);
+ if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
+ if (parameters & LOG_REGS) { PrintRegisters(); }
+ if (parameters & LOG_FP_REGS) { PrintFPRegisters(); }
+ break;
+ case TRACE_DISABLE:
+ set_log_parameters(log_parameters() & ~parameters);
+ break;
+ case TRACE_OVERRIDE:
+ set_log_parameters(parameters);
+ break;
+ default:
+ // We don't support a one-shot LOG_DISASM.
+ ASSERT((parameters & LOG_DISASM) == 0);
+ // Don't print information that is already being traced.
+ parameters &= ~log_parameters();
+ // Print the requested information.
+ if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
+ if (parameters & LOG_REGS) PrintRegisters(true);
+ if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
+ }
+
+ // The stop parameters are inlined in the code. Skip them:
+ // - Skip to the end of the message string.
+ size_t size = kDebugMessageOffset + strlen(message) + 1;
+ pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
+ // - Verify that the unreachable marker is present.
+ ASSERT(pc_->Mask(ExceptionMask) == HLT);
+ ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable);
+ // - Skip past the unreachable marker.
+ set_pc(pc_->following());
+
+ // Check if the debugger should break.
+ if (parameters & BREAK) Debug();
+
+ } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) {
+ DoRuntimeCall(instr);
+ } else if (instr->ImmException() == kImmExceptionIsPrintf) {
+ // Read the argument encoded inline in the instruction stream.
+ uint32_t type;
+ memcpy(&type,
+ pc_->InstructionAtOffset(kPrintfTypeOffset),
+ sizeof(type));
+
+ const char* format = reg<const char*>(0);
+
+ // Pass all of the relevant PCS registers onto printf. It doesn't
+ // matter if we pass too many as the extra ones won't be read.
+ int result;
+ fputs(clr_printf, stream_);
+ if (type == CPURegister::kRegister) {
+ result = fprintf(stream_, format,
+ xreg(1), xreg(2), xreg(3), xreg(4),
+ xreg(5), xreg(6), xreg(7));
+ } else if (type == CPURegister::kFPRegister) {
+ result = fprintf(stream_, format,
+ dreg(0), dreg(1), dreg(2), dreg(3),
+ dreg(4), dreg(5), dreg(6), dreg(7));
+ } else {
+ ASSERT(type == CPURegister::kNoRegister);
+ result = fprintf(stream_, "%s", format);
+ }
+ fputs(clr_normal, stream_);
+
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+
+ set_xreg(0, result);
+
+ // The printf parameters are inlined in the code, so skip them.
+ set_pc(pc_->InstructionAtOffset(kPrintfLength));
+
+ // Set LR as if we'd just called a native printf function.
+ set_lr(pc());
+
+ } else if (instr->ImmException() == kImmExceptionIsUnreachable) {
+ fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n",
+ reinterpret_cast<void*>(pc_));
+ abort();
+
+ } else {
+ OS::DebugBreak();
+ }
+ break;
+ }
+
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+#endif // USE_SIMULATOR
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
new file mode 100644
index 0000000000..6a7353b461
--- /dev/null
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -0,0 +1,908 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_SIMULATOR_ARM64_H_
+#define V8_ARM64_SIMULATOR_ARM64_H_
+
+#include <stdarg.h>
+#include <vector>
+
+#include "v8.h"
+
+#include "globals.h"
+#include "utils.h"
+#include "allocation.h"
+#include "assembler.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/decoder-arm64.h"
+#include "arm64/disasm-arm64.h"
+#include "arm64/instrument-arm64.h"
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+namespace v8 {
+namespace internal {
+
+#if !defined(USE_SIMULATOR)
+
+// Running without a simulator on a native ARM64 platform.
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ (entry(p0, p1, p2, p3, p4))
+
+typedef int (*arm64_regexp_matcher)(String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type arm64_regexp_matcher.
+// The ninth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
+// Running without a simulator there is nothing to do.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static void UnregisterCTryCatch() { }
+};
+
+#else // !defined(USE_SIMULATOR)
+
+enum ReverseByteMode {
+ Reverse16 = 0,
+ Reverse32 = 1,
+ Reverse64 = 2
+};
+
+
+// The proper way to initialize a simulated system register (such as NZCV) is as
+// follows:
+// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
+class SimSystemRegister {
+ public:
+ // The default constructor represents a register which has no writable bits.
+ // It is not possible to set its value to anything other than 0.
+ SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
+
+ uint32_t RawValue() const {
+ return value_;
+ }
+
+ void SetRawValue(uint32_t new_value) {
+ value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, value_);
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ return signed_bitextract_32(msb, lsb, value_);
+ }
+
+ void SetBits(int msb, int lsb, uint32_t bits);
+
+ // Default system register values.
+ static SimSystemRegister DefaultValueFor(SystemRegister id);
+
+#define DEFINE_GETTER(Name, HighBit, LowBit, Func, Type) \
+ Type Name() const { return static_cast<Type>(Func(HighBit, LowBit)); } \
+ void Set##Name(Type bits) { \
+ SetBits(HighBit, LowBit, static_cast<Type>(bits)); \
+ }
+#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
+ static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
+ SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
+#undef DEFINE_ZERO_BITS
+#undef DEFINE_GETTER
+
+ protected:
+ // Most system registers only implement a few of the bits in the word. Other
+ // bits are "read-as-zero, write-ignored". The write_ignore_mask argument
+ // describes the bits which are not modifiable.
+ SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
+ : value_(value), write_ignore_mask_(write_ignore_mask) { }
+
+ uint32_t value_;
+ uint32_t write_ignore_mask_;
+};
+
+
+// Represent a register (r0-r31, v0-v31).
+template<int kSizeInBytes>
+class SimRegisterBase {
+ public:
+ template<typename T>
+ void Set(T new_value, unsigned size = sizeof(T)) {
+ ASSERT(size <= kSizeInBytes);
+ ASSERT(size <= sizeof(new_value));
+ // All AArch64 registers are zero-extending; Writing a W register clears the
+ // top bits of the corresponding X register.
+ memset(value_, 0, kSizeInBytes);
+ memcpy(value_, &new_value, size);
+ }
+
+ // Copy 'size' bytes of the register to the result, and zero-extend to fill
+ // the result.
+ template<typename T>
+ T Get(unsigned size = sizeof(T)) const {
+ ASSERT(size <= kSizeInBytes);
+ T result;
+ memset(&result, 0, sizeof(result));
+ memcpy(&result, value_, size);
+ return result;
+ }
+
+ protected:
+ uint8_t value_[kSizeInBytes];
+};
+typedef SimRegisterBase<kXRegSize> SimRegister; // r0-r31
+typedef SimRegisterBase<kDRegSize> SimFPRegister; // v0-v31
+
+
+class Simulator : public DecoderVisitor {
+ public:
+ explicit Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
+ Isolate* isolate = NULL,
+ FILE* stream = stderr);
+ Simulator();
+ ~Simulator();
+
+ // System functions.
+
+ static void Initialize(Isolate* isolate);
+
+ static Simulator* current(v8::internal::Isolate* isolate);
+
+ class CallArgument;
+
+ // Call an arbitrary function taking an arbitrary number of arguments. The
+ // varargs list must be a set of arguments with type CallArgument, and
+ // terminated by CallArgument::End().
+ void CallVoid(byte* entry, CallArgument* args);
+
+ // Like CallVoid, but expect a return value.
+ int64_t CallInt64(byte* entry, CallArgument* args);
+ double CallDouble(byte* entry, CallArgument* args);
+
+ // V8 calls into generated JS code with 5 parameters and into
+ // generated RegExp code with 10 parameters. These are convenience functions,
+ // which set up the simulator state and grab the result on return.
+ int64_t CallJS(byte* entry,
+ byte* function_entry,
+ JSFunction* func,
+ Object* revc,
+ int64_t argc,
+ Object*** argv);
+ int64_t CallRegExp(byte* entry,
+ String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+ // A wrapper class that stores an argument for one of the above Call
+ // functions.
+ //
+ // Only arguments up to 64 bits in size are supported.
+ class CallArgument {
+ public:
+ template<typename T>
+ explicit CallArgument(T argument) {
+ ASSERT(sizeof(argument) <= sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = X_ARG;
+ }
+
+ explicit CallArgument(double argument) {
+ ASSERT(sizeof(argument) == sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = D_ARG;
+ }
+
+ explicit CallArgument(float argument) {
+ // TODO(all): CallArgument(float) is untested, remove this check once
+ // tested.
+ UNIMPLEMENTED();
+ // Make the D register a NaN to try to trap errors if the callee expects a
+ // double. If it expects a float, the callee should ignore the top word.
+ ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
+ memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
+ // Write the float payload to the S register.
+ ASSERT(sizeof(argument) <= sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = D_ARG;
+ }
+
+ // This indicates the end of the arguments list, so that CallArgument
+ // objects can be passed into varargs functions.
+ static CallArgument End() { return CallArgument(); }
+
+ int64_t bits() const { return bits_; }
+ bool IsEnd() const { return type_ == NO_ARG; }
+ bool IsX() const { return type_ == X_ARG; }
+ bool IsD() const { return type_ == D_ARG; }
+
+ private:
+ enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
+
+ // All arguments are aligned to at least 64 bits and we don't support
+ // passing bigger arguments, so the payload size can be fixed at 64 bits.
+ int64_t bits_;
+ CallArgumentType type_;
+
+ CallArgument() { type_ = NO_ARG; }
+ };
+
+
+ // Start the debugging command line.
+ void Debug();
+
+ bool GetValue(const char* desc, int64_t* value);
+
+ bool PrintValue(const char* desc);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit() const;
+
+ void ResetState();
+
+ // Runtime call support.
+ static void* RedirectExternalReference(void* external_function,
+ ExternalReference::Type type);
+ void DoRuntimeCall(Instruction* instr);
+
+ // Run the simulator.
+ static const Instruction* kEndOfSimAddress;
+ void DecodeInstruction();
+ void Run();
+ void RunFrom(Instruction* start);
+
+ // Simulation helpers.
+ template <typename T>
+ void set_pc(T new_pc) {
+ ASSERT(sizeof(T) == sizeof(pc_));
+ memcpy(&pc_, &new_pc, sizeof(T));
+ pc_modified_ = true;
+ }
+ Instruction* pc() { return pc_; }
+
+ void increment_pc() {
+ if (!pc_modified_) {
+ pc_ = pc_->following();
+ }
+
+ pc_modified_ = false;
+ }
+
+ virtual void Decode(Instruction* instr) {
+ decoder_->Decode(instr);
+ }
+
+ void ExecuteInstruction() {
+ ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
+ CheckBreakNext();
+ Decode(pc_);
+ LogProcessorState();
+ increment_pc();
+ CheckBreakpoints();
+ }
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ // Register accessors.
+
+ // Return 'size' bits of the value of an integer register, as the specified
+ // type. The value is zero-extended to fill the result.
+ //
+ // The only supported values of 'size' are kXRegSizeInBits and
+ // kWRegSizeInBits.
+ template<typename T>
+ T reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
+ ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ T result;
+ memset(&result, 0, sizeof(result));
+ return result;
+ }
+ return registers_[code].Get<T>(size_in_bytes);
+ }
+
+ // Like reg(), but infer the access size from the template type.
+ template<typename T>
+ T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<T>(sizeof(T) * 8, code, r31mode);
+ }
+
+ // Common specialized accessors for the reg() template.
+ int32_t wreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int32_t>(code, r31mode);
+ }
+
+ int64_t xreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(code, r31mode);
+ }
+
+ int64_t reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(size, code, r31mode);
+ }
+
+ // Write 'size' bits of 'value' into an integer register. The value is
+ // zero-extended. This behaviour matches AArch64 register writes.
+ //
+ // The only supported values of 'size' are kXRegSizeInBits and
+ // kWRegSizeInBits.
+ template<typename T>
+ void set_reg(unsigned size, unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
+ ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ return;
+ }
+ return registers_[code].Set(value, size_in_bytes);
+ }
+
+ // Like set_reg(), but infer the access size from the template type.
+ template<typename T>
+ void set_reg(unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(sizeof(value) * 8, code, value, r31mode);
+ }
+
+ // Common specialized accessors for the set_reg() template.
+ void set_wreg(unsigned code, int32_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(kWRegSizeInBits, code, value, r31mode);
+ }
+
+ void set_xreg(unsigned code, int64_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(kXRegSizeInBits, code, value, r31mode);
+ }
+
+ // Commonly-used special cases.
+ template<typename T>
+ void set_lr(T value) {
+ ASSERT(sizeof(T) == kPointerSize);
+ set_reg(kLinkRegCode, value);
+ }
+
+ template<typename T>
+ void set_sp(T value) {
+ ASSERT(sizeof(T) == kPointerSize);
+ set_reg(31, value, Reg31IsStackPointer);
+ }
+
+ int64_t sp() { return xreg(31, Reg31IsStackPointer); }
+ int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
+ int64_t fp() {
+ return xreg(kFramePointerRegCode, Reg31IsStackPointer);
+ }
+ Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
+
+ Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
+
+ // Return 'size' bits of the value of a floating-point register, as the
+ // specified type. The value is zero-extended to fill the result.
+ //
+ // The only supported values of 'size' are kDRegSizeInBits and
+ // kSRegSizeInBits.
+ template<typename T>
+ T fpreg(unsigned size, unsigned code) const {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kDRegSizeInBits) || (size == kSRegSizeInBits));
+ ASSERT(code < kNumberOfFPRegisters);
+ return fpregisters_[code].Get<T>(size_in_bytes);
+ }
+
+ // Like fpreg(), but infer the access size from the template type.
+ template<typename T>
+ T fpreg(unsigned code) const {
+ return fpreg<T>(sizeof(T) * 8, code);
+ }
+
+ // Common specialized accessors for the fpreg() template.
+ float sreg(unsigned code) const {
+ return fpreg<float>(code);
+ }
+
+ uint32_t sreg_bits(unsigned code) const {
+ return fpreg<uint32_t>(code);
+ }
+
+ double dreg(unsigned code) const {
+ return fpreg<double>(code);
+ }
+
+ uint64_t dreg_bits(unsigned code) const {
+ return fpreg<uint64_t>(code);
+ }
+
+ double fpreg(unsigned size, unsigned code) const {
+ switch (size) {
+ case kSRegSizeInBits: return sreg(code);
+ case kDRegSizeInBits: return dreg(code);
+ default:
+ UNREACHABLE();
+ return 0.0;
+ }
+ }
+
+ // Write 'value' into a floating-point register. The value is zero-extended.
+ // This behaviour matches AArch64 register writes.
+ template<typename T>
+ void set_fpreg(unsigned code, T value) {
+ ASSERT((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
+ ASSERT(code < kNumberOfFPRegisters);
+ fpregisters_[code].Set(value, sizeof(value));
+ }
+
+ // Common specialized accessors for the set_fpreg() template.
+ void set_sreg(unsigned code, float value) {
+ set_fpreg(code, value);
+ }
+
+ void set_sreg_bits(unsigned code, uint32_t value) {
+ set_fpreg(code, value);
+ }
+
+ void set_dreg(unsigned code, double value) {
+ set_fpreg(code, value);
+ }
+
+ void set_dreg_bits(unsigned code, uint64_t value) {
+ set_fpreg(code, value);
+ }
+
+ SimSystemRegister& nzcv() { return nzcv_; }
+ SimSystemRegister& fpcr() { return fpcr_; }
+
+ // Debug helpers
+
+ // Simulator breakpoints.
+ struct Breakpoint {
+ Instruction* location;
+ bool enabled;
+ };
+ std::vector<Breakpoint> breakpoints_;
+ void SetBreakpoint(Instruction* breakpoint);
+ void ListBreakpoints();
+ void CheckBreakpoints();
+
+ // Helpers for the 'next' command.
+ // When this is set, the Simulator will insert a breakpoint after the next BL
+ // instruction it meets.
+ bool break_on_next_;
+ // Check if the Simulator should insert a break after the current instruction
+ // for the 'next' command.
+ void CheckBreakNext();
+
+ // Disassemble instruction at the given address.
+ void PrintInstructionsAt(Instruction* pc, uint64_t count);
+
+ void PrintSystemRegisters(bool print_all = false);
+ void PrintRegisters(bool print_all_regs = false);
+ void PrintFPRegisters(bool print_all_regs = false);
+ void PrintProcessorState();
+ void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ void LogSystemRegisters() {
+ if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
+ }
+ void LogRegisters() {
+ if (log_parameters_ & LOG_REGS) PrintRegisters();
+ }
+ void LogFPRegisters() {
+ if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
+ }
+ void LogProcessorState() {
+ LogSystemRegisters();
+ LogRegisters();
+ LogFPRegisters();
+ }
+ void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
+ if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
+ }
+
+ int log_parameters() { return log_parameters_; }
+ void set_log_parameters(int new_parameters) {
+ log_parameters_ = new_parameters;
+ if (!decoder_) {
+ if (new_parameters & LOG_DISASM) {
+ PrintF("Run --debug-sim to dynamically turn on disassembler\n");
+ }
+ return;
+ }
+ if (new_parameters & LOG_DISASM) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ } else {
+ decoder_->RemoveVisitor(print_disasm_);
+ }
+ }
+
+ static inline const char* WRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* XRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* SRegNameForCode(unsigned code);
+ static inline const char* DRegNameForCode(unsigned code);
+ static inline const char* VRegNameForCode(unsigned code);
+ static inline int CodeFromName(const char* name);
+
+ protected:
+ // Simulation helpers ------------------------------------
+ bool ConditionPassed(Condition cond) {
+ SimSystemRegister& flags = nzcv();
+ switch (cond) {
+ case eq:
+ return flags.Z();
+ case ne:
+ return !flags.Z();
+ case hs:
+ return flags.C();
+ case lo:
+ return !flags.C();
+ case mi:
+ return flags.N();
+ case pl:
+ return !flags.N();
+ case vs:
+ return flags.V();
+ case vc:
+ return !flags.V();
+ case hi:
+ return flags.C() && !flags.Z();
+ case ls:
+ return !(flags.C() && !flags.Z());
+ case ge:
+ return flags.N() == flags.V();
+ case lt:
+ return flags.N() != flags.V();
+ case gt:
+ return !flags.Z() && (flags.N() == flags.V());
+ case le:
+ return !(!flags.Z() && (flags.N() == flags.V()));
+ case nv: // Fall through.
+ case al:
+ return true;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+
+ bool ConditionFailed(Condition cond) {
+ return !ConditionPassed(cond);
+ }
+
+ void AddSubHelper(Instruction* instr, int64_t op2);
+ int64_t AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ int64_t src1,
+ int64_t src2,
+ int64_t carry_in = 0);
+ void LogicalHelper(Instruction* instr, int64_t op2);
+ void ConditionalCompareHelper(Instruction* instr, int64_t op2);
+ void LoadStoreHelper(Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
+ uint8_t* LoadStoreAddress(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStoreWriteBack(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
+
+ uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
+ uint8_t MemoryRead8(uint8_t* address);
+ uint16_t MemoryRead16(uint8_t* address);
+ uint32_t MemoryRead32(uint8_t* address);
+ float MemoryReadFP32(uint8_t* address);
+ uint64_t MemoryRead64(uint8_t* address);
+ double MemoryReadFP64(uint8_t* address);
+
+ void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ void MemoryWrite32(uint8_t* address, uint32_t value);
+ void MemoryWriteFP32(uint8_t* address, float value);
+ void MemoryWrite64(uint8_t* address, uint64_t value);
+ void MemoryWriteFP64(uint8_t* address, double value);
+
+ int64_t ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t Rotate(unsigned reg_width,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t ExtendValue(unsigned reg_width,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift = 0);
+
+ uint64_t ReverseBits(uint64_t value, unsigned num_bits);
+ uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
+
+ template <typename T>
+ T FPDefaultNaN() const;
+
+ void FPCompare(double val0, double val1);
+ double FPRoundInt(double value, FPRounding round_mode);
+ double FPToDouble(float value);
+ float FPToFloat(double value, FPRounding round_mode);
+ double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
+ double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
+ float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
+ float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
+ int32_t FPToInt32(double value, FPRounding rmode);
+ int64_t FPToInt64(double value, FPRounding rmode);
+ uint32_t FPToUInt32(double value, FPRounding rmode);
+ uint64_t FPToUInt64(double value, FPRounding rmode);
+
+ template <typename T>
+ T FPAdd(T op1, T op2);
+
+ template <typename T>
+ T FPDiv(T op1, T op2);
+
+ template <typename T>
+ T FPMax(T a, T b);
+
+ template <typename T>
+ T FPMaxNM(T a, T b);
+
+ template <typename T>
+ T FPMin(T a, T b);
+
+ template <typename T>
+ T FPMinNM(T a, T b);
+
+ template <typename T>
+ T FPMul(T op1, T op2);
+
+ template <typename T>
+ T FPMulAdd(T a, T op1, T op2);
+
+ template <typename T>
+ T FPSqrt(T op);
+
+ template <typename T>
+ T FPSub(T op1, T op2);
+
+ // Standard NaN processing.
+ template <typename T>
+ T FPProcessNaN(T op);
+
+ bool FPProcessNaNs(Instruction* instr);
+
+ template <typename T>
+ T FPProcessNaNs(T op1, T op2);
+
+ template <typename T>
+ T FPProcessNaNs3(T op1, T op2, T op3);
+
+ void CheckStackAlignment();
+
+ inline void CheckPCSComplianceAndRun();
+
+#ifdef DEBUG
+ // Corruption values should have their least significant byte cleared to
+ // allow the code of the register being corrupted to be inserted.
+ static const uint64_t kCallerSavedRegisterCorruptionValue =
+ 0xca11edc0de000000UL;
+ // This value is a NaN in both 32-bit and 64-bit FP.
+ static const uint64_t kCallerSavedFPRegisterCorruptionValue =
+ 0x7ff000007f801000UL;
+ // This value is a mix of 32/64-bits NaN and "verbose" immediate.
+ static const uint64_t kDefaultCPURegisterCorruptionValue =
+ 0x7ffbad007f8bad00UL;
+
+ void CorruptRegisters(CPURegList* list,
+ uint64_t value = kDefaultCPURegisterCorruptionValue);
+ void CorruptAllCallerSavedCPURegisters();
+#endif
+
+ // Processor state ---------------------------------------
+
+ // Output stream.
+ FILE* stream_;
+ PrintDisassembler* print_disasm_;
+
+ // Instrumentation.
+ Instrument* instrument_;
+
+ // General purpose registers. Register 31 is the stack pointer.
+ SimRegister registers_[kNumberOfRegisters];
+
+ // Floating point registers
+ SimFPRegister fpregisters_[kNumberOfFPRegisters];
+
+ // Processor state
+ // bits[31, 27]: Condition flags N, Z, C, and V.
+ // (Negative, Zero, Carry, Overflow)
+ SimSystemRegister nzcv_;
+
+ // Floating-Point Control Register
+ SimSystemRegister fpcr_;
+
+ // Only a subset of FPCR features are supported by the simulator. This helper
+ // checks that the FPCR settings are supported.
+ //
+ // This is checked when floating-point instructions are executed, not when
+ // FPCR is set. This allows generated code to modify FPCR for external
+ // functions, or to save and restore it when entering and leaving generated
+ // code.
+ void AssertSupportedFPCR() {
+ ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
+ ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
+
+ // The simulator does not support half-precision operations so fpcr().AHP()
+ // is irrelevant, and is not checked here.
+ }
+
+ static int CalcNFlag(uint64_t result, unsigned reg_size) {
+ return (result >> (reg_size - 1)) & 1;
+ }
+
+ static int CalcZFlag(uint64_t result) {
+ return result == 0;
+ }
+
+ static const uint32_t kConditionFlagsMask = 0xf0000000;
+
+ // Stack
+ byte* stack_;
+ static const intptr_t stack_protection_size_ = KB;
+ intptr_t stack_size_;
+ byte* stack_limit_;
+
+ Decoder<DispatchingDecoderVisitor>* decoder_;
+ Decoder<DispatchingDecoderVisitor>* disassembler_decoder_;
+
+ // Indicates if the pc has been modified by the instruction and should not be
+ // automatically incremented.
+ bool pc_modified_;
+ Instruction* pc_;
+
+ static const char* xreg_names[];
+ static const char* wreg_names[];
+ static const char* sreg_names[];
+ static const char* dreg_names[];
+ static const char* vreg_names[];
+
+ // Debugger input.
+ void set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+ }
+ char* last_debugger_input() { return last_debugger_input_; }
+ char* last_debugger_input_;
+
+ private:
+ void Init(FILE* stream);
+
+ int log_parameters_;
+ Isolate* isolate_;
+};
+
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
+ FUNCTION_ADDR(entry), \
+ p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ Simulator::current(Isolate::Current())->CallRegExp( \
+ entry, \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.
+// See also 'class SimulatorStack' in arm/simulator-arm.h.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit();
+ }
+
+ static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(Isolate::Current());
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static void UnregisterCTryCatch() {
+ Simulator::current(Isolate::Current())->PopAddress();
+ }
+};
+
+#endif // !defined(USE_SIMULATOR)
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_SIMULATOR_ARM64_H_
diff --git a/deps/v8/src/arm64/stub-cache-arm64.cc b/deps/v8/src/arm64/stub-cache-arm64.cc
new file mode 100644
index 0000000000..1b2e959936
--- /dev/null
+++ b/deps/v8/src/arm64/stub-cache-arm64.cc
@@ -0,0 +1,1496 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "ic-inl.h"
+#include "codegen.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(!AreAliased(receiver, scratch0, scratch1));
+ ASSERT(name->IsUniqueName());
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
+ __ B(ne, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
+
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
+ __ Bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+// Probe primary or secondary table.
+// If the entry is found in the cache, the generated code jump to the first
+// instruction of the stub in the cache.
+// If there is a miss the code fall trough.
+//
+// 'receiver', 'name' and 'offset' registers are preserved on miss.
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register receiver,
+ Register name,
+ Register offset,
+ Register scratch,
+ Register scratch2,
+ Register scratch3) {
+ // Some code below relies on the fact that the Entry struct contains
+ // 3 pointers (name, code, map).
+ STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
+
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+ uintptr_t value_off_addr =
+ reinterpret_cast<uintptr_t>(value_offset.address());
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+ Label miss;
+
+ ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
+
+ // Multiply by 3 because there are 3 fields per entry.
+ __ Add(scratch3, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ Mov(scratch, key_offset);
+ __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
+
+ // Check that the key in the entry matches the name.
+ __ Ldr(scratch2, MemOperand(scratch));
+ __ Cmp(name, scratch2);
+ __ B(ne, &miss);
+
+ // Check the map matches.
+ __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
+ __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Cmp(scratch2, scratch3);
+ __ B(ne, &miss);
+
+ // Get the code entry from the cache.
+ __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
+ __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
+ __ Cmp(scratch2.W(), flags);
+ __ B(ne, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ B(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ B(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(scratch);
+
+ // Miss: fall through.
+ __ Bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra,
+ Register extra2,
+ Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+
+ // Make sure extra and extra2 registers are valid.
+ ASSERT(!extra.is(no_reg));
+ ASSERT(!extra2.is(no_reg));
+ ASSERT(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
+ extra2, extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Compute the hash for primary table.
+ __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Add(scratch, scratch, extra);
+ __ Eor(scratch, scratch, flags);
+ // We shift out the last two bits because they are not part of the hash.
+ __ Ubfx(scratch, scratch, kHeapObjectTagSize,
+ CountTrailingZeros(kPrimaryTableSize, 64));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary table.
+ __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
+ __ Add(scratch, scratch, flags >> kHeapObjectTagSize);
+ __ And(scratch, scratch, kSecondaryTableSize - 1);
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
+ extra2, extra3);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ Ldr(prototype, GlobalObjectMemOperand());
+ // Load the native context from the global or builtins object.
+ __ Ldr(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ __ Ldr(prototype, ContextMemOperand(prototype, index));
+ // Load the initial map. The global functions all have initial maps.
+ __ Ldr(prototype,
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ __ Ldr(scratch, GlobalObjectMemOperand());
+ __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ Ldr(scratch, ContextMemOperand(scratch, index));
+ __ Cmp(scratch, Operand(function));
+ __ B(ne, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!representation.IsDouble());
+ USE(representation);
+ if (inobject) {
+ int offset = index * kPointerSize;
+ __ Ldr(dst, FieldMemOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ Ldr(dst, FieldMemOperand(dst, offset));
+ }
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ ASSERT(!AreAliased(receiver, scratch));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss_label);
+
+ // Check that the object is a JS array.
+ __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE,
+ miss_label);
+
+ // Load length directly from the JS array.
+ __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ // TryGetFunctionPrototype can't put the result directly in x0 because the
+ // 3 inputs registers can't alias and we call this function from
+ // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
+ // move the result in x0.
+ __ Mov(x0, scratch1);
+ __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ Mov(scratch, Operand(cell));
+ __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
+}
+
+
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Generate StoreTransition code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and storage_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name registers
+// have their original values.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
+ Label exit;
+
+ ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg,
+ scratch1, scratch2, scratch3));
+
+ // We don't need scratch3.
+ scratch3 = NoReg;
+
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ LoadObject(scratch1, constant);
+ __ Cmp(value_reg, scratch1);
+ __ B(ne, miss_label);
+ } else if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (representation.IsDouble()) {
+ UseScratchRegisterScope temps(masm);
+ DoubleRegister temp_double = temps.AcquireD();
+ __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
+
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2);
+
+ __ JumpIfSmi(value_reg, &do_store);
+
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ Str(temp_double, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if ((details.type() == FIELD) &&
+ (object->map()->unused_property_fields() == 0)) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ Mov(scratch1, Operand(transition));
+ __ Push(receiver_reg, scratch1, value_reg);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ Mov(scratch1, Operand(transition));
+ __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Str(prop_reg, FieldMemOperand(scratch1, offset));
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+}
+
+
+// Generate StoreField code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and name_reg may
+// be clobbered. Upon branch to miss_label, the receiver and name registers have
+// their original values.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // x0 : value
+ Label exit;
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (representation.IsDouble()) {
+ UseScratchRegisterScope temps(masm);
+ DoubleRegister temp_double = temps.AcquireD();
+
+ __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
+
+ // Load the double storage.
+ if (index < 0) {
+ int offset = (index * kPointerSize) + object->map()->instance_size();
+ __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset));
+ } else {
+ int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Ldr(scratch1, FieldMemOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+
+ __ JumpIfSmi(value_reg, &do_store);
+
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ Str(temp_double, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
+
+ if (!representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ Mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Str(value_reg, FieldMemOperand(scratch1, offset));
+
+ if (!representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ Mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+}
+
+
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ Bind(label);
+ __ Mov(this->name(), Operand(name));
+ }
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
+
+ __ Push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ Mov(scratch, Operand(interceptor));
+ __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(!AreAliased(receiver, scratch));
+
+ MacroAssembler::PushPopQueue queue(masm);
+ queue.Queue(receiver);
+ // Write the arguments to the stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!AreAliased(receiver, scratch, arg));
+ queue.Queue(arg);
+ }
+ queue.PushQueued();
+
+ ASSERT(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Mov(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ LoadObject(call_data, api_call_info);
+ __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ LoadObject(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference ref = ExternalReference(&fun,
+ ExternalReference::DIRECT_API_CALL,
+ masm->isolate());
+ __ Mov(api_function_address, ref);
+
+ // Jump to stub.
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Handle<Name> name,
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
+
+ // object_reg and holder_reg registers can alias.
+ ASSERT(!AreAliased(object_reg, scratch1, scratch2));
+ ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant());
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ ASSERT(current.is_null() ||
+ (current->property_dictionary()->FindEntry(*name) ==
+ NameDictionary::kNotFound));
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
+
+ __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ bool need_map = (depth != 1 || check == CHECK_ALL_MAPS) ||
+ heap()->InNewSpace(*prototype);
+ Register map_reg = NoReg;
+ if (need_map) {
+ map_reg = scratch1;
+ __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ if (current_map->IsJSGlobalProxyMap()) {
+ UseScratchRegisterScope temps(masm());
+ __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (heap()->InNewSpace(*prototype)) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ Mov(reg, Operand(prototype));
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ // Check the holder map.
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ __ Bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> callback) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
+ // HandlerFrontendHeader can return its result into scratch1() so do not
+ // use it.
+ Register scratch2 = this->scratch2();
+ Register scratch3 = this->scratch3();
+ Register dictionary = this->scratch4();
+ ASSERT(!AreAliased(reg, scratch2, scratch3, dictionary));
+
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ // Load the properties dictionary.
+ __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2,
+ scratch3);
+ __ Bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3;
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
+ __ Cmp(scratch2, Operand(callback));
+ __ B(ne, &miss);
+ }
+
+ HandlerFrontendFooter(name, &miss);
+ return reg;
+}
+
+
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
+ __ Mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
+}
+
+
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ LoadObject(x0, value);
+ __ Ret();
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build ExecutableAccessorInfo::args_ list on the stack and push property
+ // name below the exit frame to make GC aware of them and store pointers to
+ // them.
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+
+ __ Push(receiver());
+
+ if (heap()->InNewSpace(callback->data())) {
+ __ Mov(scratch3(), Operand(callback));
+ __ Ldr(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
+ }
+ __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
+ __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
+
+ Register args_addr = scratch2();
+ __ Add(args_addr, __ StackPointer(), kPointerSize);
+
+ // Stack at this point:
+ // sp[40] callback data
+ // sp[32] undefined
+ // sp[24] undefined
+ // sp[16] isolate
+ // args_addr -> sp[8] reg
+ // sp[0] name
+
+ // Abi for CallApiGetter.
+ Register getter_address_reg = x2;
+
+ // Set up the call.
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ Mov(getter_address_reg, ref);
+
+ CallApiGetterStub stub;
+ __ TailCallStub(&stub);
+}
+
+
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<Object> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ ASSERT(!AreAliased(receiver(), this->name(),
+ scratch1(), scratch2(), scratch3()));
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added later.
+ bool compile_followup_inline = false;
+ if (lookup->IsFound() && lookup->IsCacheable()) {
+ if (lookup->IsField()) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from
+ // the holder and it is needed should the interceptor return without any
+ // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+ // the FIELD case might cause a miss during the prototype check.
+ bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+ bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
+ (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ JumpIfRoot(x0,
+ Heap::kNoInterceptorResultSentinelRootIndex,
+ &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ Bind(&interceptor_failed);
+ if (must_preserve_receiver_reg) {
+ __ Pop(this->name(), holder_reg, receiver());
+ } else {
+ __ Pop(this->name(), holder_reg);
+ }
+ // Leave the internal frame.
+ }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ PushInterceptorArguments(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
+ isolate());
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
+ }
+}
+
+
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
+ UseScratchRegisterScope temps(masm());
+ // Check that the object is a boolean.
+ Register true_root = temps.AcquireX();
+ Register false_root = temps.AcquireX();
+ ASSERT(!AreAliased(object, true_root, false_root));
+ __ LoadTrueFalseRoots(true_root, false_root);
+ __ Cmp(object, true_root);
+ __ Ccmp(object, false_root, ZFlag, ne);
+ __ B(ne, miss);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASM_LOCATION("StoreStubCompiler::CompileStoreCallback");
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
+
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+
+ // receiver() and holder_reg can alias.
+ ASSERT(!AreAliased(receiver(), scratch1(), scratch2(), value()));
+ ASSERT(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
+ __ Mov(scratch1(), Operand(callback));
+ __ Mov(scratch2(), Operand(name));
+ __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ Push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ Pop(x0);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name) {
+ Label miss;
+
+ ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor");
+
+ __ Push(receiver(), this->name(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
+
+ // Return undefined if maps of the full prototype chain are still the
+ // same and no global property with this name contains a value.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+// TODO(all): The so-called scratch registers are significant in some cases. For
+// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for
+// KeyedStoreCompiler::transition_map(). We should verify which registers are
+// actually scratch registers, and which are important. For now, we use the same
+// assignments as ARM to remain on the safe side.
+
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { x0, x2, x3, x1, x4, x5 };
+ return registers;
+}
+
+
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name/key, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { x1, x0, x2, x3, x4, x5 };
+ return registers;
+}
+
+
+Register StoreStubCompiler::value() {
+ return x0;
+}
+
+
+Register* StoreStubCompiler::registers() {
+ // receiver, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { x1, x2, x3, x4, x5 };
+ return registers;
+}
+
+
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { x2, x1, x3, x4, x5 };
+ return registers;
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<HeapType> type,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete) {
+ Label miss;
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
+
+ // Get the value from the cell.
+ __ Mov(x3, Operand(cell));
+ __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+ __ Mov(x0, x4);
+ __ Ret();
+
+ HandlerFrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
+ TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ Register map_reg = scratch1();
+ __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ Label try_next;
+ __ Cmp(map_reg, Operand(map));
+ __ B(ne, &try_next);
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ Bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
+ __ Bind(&try_next);
+ }
+ }
+ ASSERT(number_of_handled_maps != 0);
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
+}
+
+
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+
+ ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic");
+
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; i++) {
+ __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
+
+ Label skip;
+ __ B(&skip, ne);
+ if (!transitioned_maps->at(i).is_null()) {
+ // This argument is used by the handler stub. For example, see
+ // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
+ __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
+ }
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ Bind(&skip);
+ }
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow, miss;
+
+ Register result = x0;
+ Register key = x0;
+ Register receiver = x1;
+
+ __ JumpIfNotSmi(key, &miss);
+ __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6);
+ __ Ret();
+
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ // Miss case, call the runtime.
+ __ Bind(&miss);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc
new file mode 100644
index 0000000000..e2589f42e8
--- /dev/null
+++ b/deps/v8/src/arm64/utils-arm64.cc
@@ -0,0 +1,112 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "arm64/utils-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+#define __ assm->
+
+
+int CountLeadingZeros(uint64_t value, int width) {
+ // TODO(jbramley): Optimize this for ARM64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ uint64_t bit_test = 1UL << (width - 1);
+ while ((count < width) && ((bit_test & value) == 0)) {
+ count++;
+ bit_test >>= 1;
+ }
+ return count;
+}
+
+
+int CountLeadingSignBits(int64_t value, int width) {
+ // TODO(jbramley): Optimize this for ARM64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ if (value >= 0) {
+ return CountLeadingZeros(value, width) - 1;
+ } else {
+ return CountLeadingZeros(~value, width) - 1;
+ }
+}
+
+
+int CountTrailingZeros(uint64_t value, int width) {
+ // TODO(jbramley): Optimize this for ARM64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ while ((count < width) && (((value >> count) & 1) == 0)) {
+ count++;
+ }
+ return count;
+}
+
+
+int CountSetBits(uint64_t value, int width) {
+ // TODO(jbramley): Would it be useful to allow other widths? The
+ // implementation already supports them.
+ ASSERT((width == 32) || (width == 64));
+
+ // Mask out unused bits to ensure that they are not counted.
+ value &= (0xffffffffffffffffUL >> (64-width));
+
+ // Add up the set bits.
+ // The algorithm works by adding pairs of bit fields together iteratively,
+ // where the size of each bit field doubles each time.
+ // An example for an 8-bit value:
+ // Bits: h g f e d c b a
+ // \ | \ | \ | \ |
+ // value = h+g f+e d+c b+a
+ // \ | \ |
+ // value = h+g+f+e d+c+b+a
+ // \ |
+ // value = h+g+f+e+d+c+b+a
+ value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
+ value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
+ value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
+ value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
+ value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
+ value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
+
+ return value;
+}
+
+
+int MaskToBit(uint64_t mask) {
+ ASSERT(CountSetBits(mask, 64) == 1);
+ return CountTrailingZeros(mask, 64);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h
new file mode 100644
index 0000000000..a1fa12cfa7
--- /dev/null
+++ b/deps/v8/src/arm64/utils-arm64.h
@@ -0,0 +1,135 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_UTILS_ARM64_H_
+#define V8_ARM64_UTILS_ARM64_H_
+
+#include <cmath>
+#include "v8.h"
+#include "arm64/constants-arm64.h"
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+namespace v8 {
+namespace internal {
+
+// These are global assumptions in v8.
+STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
+STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
+
+// Floating point representation.
+static inline uint32_t float_to_rawbits(float value) {
+ uint32_t bits = 0;
+ memcpy(&bits, &value, 4);
+ return bits;
+}
+
+
+static inline uint64_t double_to_rawbits(double value) {
+ uint64_t bits = 0;
+ memcpy(&bits, &value, 8);
+ return bits;
+}
+
+
+static inline float rawbits_to_float(uint32_t bits) {
+ float value = 0.0;
+ memcpy(&value, &bits, 4);
+ return value;
+}
+
+
+static inline double rawbits_to_double(uint64_t bits) {
+ double value = 0.0;
+ memcpy(&value, &bits, 8);
+ return value;
+}
+
+
+// Bit counting.
+int CountLeadingZeros(uint64_t value, int width);
+int CountLeadingSignBits(int64_t value, int width);
+int CountTrailingZeros(uint64_t value, int width);
+int CountSetBits(uint64_t value, int width);
+int MaskToBit(uint64_t mask);
+
+
+// NaN tests.
+inline bool IsSignallingNaN(double num) {
+ uint64_t raw = double_to_rawbits(num);
+ if (std::isnan(num) && ((raw & kDQuietNanMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(float num) {
+ uint32_t raw = float_to_rawbits(num);
+ if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+template <typename T>
+inline bool IsQuietNaN(T num) {
+ return std::isnan(num) && !IsSignallingNaN(num);
+}
+
+
+// Convert the NaN in 'num' to a quiet NaN.
+inline double ToQuietNaN(double num) {
+ ASSERT(isnan(num));
+ return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
+}
+
+
+inline float ToQuietNaN(float num) {
+ ASSERT(isnan(num));
+ return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
+}
+
+
+// Fused multiply-add.
+inline double FusedMultiplyAdd(double op1, double op2, double a) {
+ return fma(op1, op2, a);
+}
+
+
+inline float FusedMultiplyAdd(float op1, float op2, float a) {
+ return fmaf(op1, op2, a);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_UTILS_ARM64_H_
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js
index a8c5e001c4..3af659dbcc 100644
--- a/deps/v8/src/array-iterator.js
+++ b/deps/v8/src/array-iterator.js
@@ -36,9 +36,9 @@ var ARRAY_ITERATOR_KIND_VALUES = 2;
var ARRAY_ITERATOR_KIND_ENTRIES = 3;
// The spec draft also has "sparse" but it is never used.
-var iteratorObjectSymbol = NEW_PRIVATE("iterator_object");
-var arrayIteratorNextIndexSymbol = NEW_PRIVATE("iterator_next");
-var arrayIterationKindSymbol = NEW_PRIVATE("iterator_kind");
+var arrayIteratorObjectSymbol = GLOBAL_PRIVATE("ArrayIterator#object");
+var arrayIteratorNextIndexSymbol = GLOBAL_PRIVATE("ArrayIterator#next");
+var arrayIterationKindSymbol = GLOBAL_PRIVATE("ArrayIterator#kind");
function ArrayIterator() {}
@@ -46,7 +46,7 @@ function ArrayIterator() {}
function CreateArrayIterator(array, kind) {
var object = ToObject(array);
var iterator = new ArrayIterator;
- SET_PRIVATE(iterator, iteratorObjectSymbol, object);
+ SET_PRIVATE(iterator, arrayIteratorObjectSymbol, object);
SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
SET_PRIVATE(iterator, arrayIterationKindSymbol, kind);
return iterator;
@@ -60,7 +60,7 @@ function CreateIteratorResultObject(value, done) {
// 15.4.5.2.2 ArrayIterator.prototype.next( )
function ArrayIteratorNext() {
var iterator = ToObject(this);
- var array = GET_PRIVATE(iterator, iteratorObjectSymbol);
+ var array = GET_PRIVATE(iterator, arrayIteratorObjectSymbol);
if (!array) {
throw MakeTypeError('incompatible_method_receiver',
['Array Iterator.prototype.next']);
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 372b7ece63..e48230e2bd 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -1115,8 +1115,8 @@ function ArraySort(comparefn) {
max_prototype_element = CopyFromPrototype(this, length);
}
- var num_non_undefined = %IsObserved(this) ?
- -1 : %RemoveArrayHoles(this, length);
+ // %RemoveArrayHoles returns -1 if fast removal is not supported.
+ var num_non_undefined = %RemoveArrayHoles(this, length);
if (num_non_undefined == -1) {
// The array is observed, or there were indexed accessors in the array.
@@ -1153,7 +1153,7 @@ function ArrayFilter(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1201,7 +1201,7 @@ function ArrayForEach(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1242,7 +1242,7 @@ function ArraySome(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1282,7 +1282,7 @@ function ArrayEvery(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1321,7 +1321,7 @@ function ArrayMap(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 436d035c3e..772b6d6963 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -59,6 +59,8 @@
#include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
@@ -73,6 +75,8 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -122,7 +126,6 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
-
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -283,9 +286,12 @@ int Label::pos() const {
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
//
-// 1101: constant pool. Used on ARM only for now.
-// The format is: 11 1101 11
-// signed int (size of the constant pool).
+// 1101: constant or veneer pool. Used only on ARM and ARM64 for now.
+// The format is: [2-bit sub-type] 1101 11
+// signed int (size of the pool).
+// The 2-bit sub-types are:
+// 00: constant pool
+// 01: veneer pool
// 1110: long_data_record
// The format is: [2-bit data_type_tag] 1110 11
// signed intptr_t, lowest byte written first
@@ -342,8 +348,9 @@ const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
-const int kConstPoolExtraTag = kPCJumpExtraTag - 2;
-const int kConstPoolTag = 3;
+const int kPoolExtraTag = kPCJumpExtraTag - 2;
+const int kConstPoolTag = 0;
+const int kVeneerPoolTag = 1;
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
@@ -403,8 +410,8 @@ void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
}
-void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
- WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
+void RelocInfoWriter::WriteExtraTaggedPoolData(int data, int pool_type) {
+ WriteExtraTag(kPoolExtraTag, pool_type);
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(data);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
@@ -476,9 +483,11 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedData(rinfo->data(), kCommentTag);
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
- } else if (RelocInfo::IsConstPool(rmode)) {
+ } else if (RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data()));
+ WriteExtraTaggedPoolData(static_cast<int>(rinfo->data()),
+ RelocInfo::IsConstPool(rmode) ? kConstPoolTag
+ : kVeneerPoolTag);
} else {
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
@@ -529,7 +538,7 @@ void RelocIterator::AdvanceReadId() {
}
-void RelocIterator::AdvanceReadConstPoolData() {
+void RelocIterator::AdvanceReadPoolData() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
@@ -671,10 +680,13 @@ void RelocIterator::next() {
}
Advance(kIntptrSize);
}
- } else if ((extra_tag == kConstPoolExtraTag) &&
- (GetTopTag() == kConstPoolTag)) {
- if (SetMode(RelocInfo::CONST_POOL)) {
- AdvanceReadConstPoolData();
+ } else if (extra_tag == kPoolExtraTag) {
+ int pool_type = GetTopTag();
+ ASSERT(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
+ RelocInfo::Mode rmode = (pool_type == kConstPoolTag) ?
+ RelocInfo::CONST_POOL : RelocInfo::VENEER_POOL;
+ if (SetMode(rmode)) {
+ AdvanceReadPoolData();
return;
}
Advance(kIntSize);
@@ -793,6 +805,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "internal reference";
case RelocInfo::CONST_POOL:
return "constant pool";
+ case RelocInfo::VENEER_POOL:
+ return "veneer pool";
case RelocInfo::DEBUG_BREAK_SLOT:
#ifndef ENABLE_DEBUGGER_SUPPORT
UNREACHABLE();
@@ -880,6 +894,7 @@ void RelocInfo::Verify() {
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
case CONST_POOL:
+ case VENEER_POOL:
case DEBUG_BREAK_SLOT:
case NONE32:
case NONE64:
@@ -1026,14 +1041,6 @@ ExternalReference ExternalReference::
ExternalReference ExternalReference::
- incremental_evacuation_record_write_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
-}
-
-
-ExternalReference ExternalReference::
store_buffer_overflow_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
@@ -1052,6 +1059,12 @@ ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
}
+ExternalReference ExternalReference::out_of_memory_function(Isolate* isolate) {
+ return
+ ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::OutOfMemory)));
+}
+
+
ExternalReference ExternalReference::delete_handle_scope_extensions(
Isolate* isolate) {
return ExternalReference(Redirect(
@@ -1336,6 +1349,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
+#elif V8_TARGET_ARCH_ARM64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
@@ -1596,4 +1611,38 @@ bool PositionsRecorder::WriteRecordedPositions() {
return written;
}
+
+MultiplierAndShift::MultiplierAndShift(int32_t d) {
+ ASSERT(d <= -2 || 2 <= d);
+ const uint32_t two31 = 0x80000000;
+ uint32_t ad = Abs(d);
+ uint32_t t = two31 + (uint32_t(d) >> 31);
+ uint32_t anc = t - 1 - t % ad; // Absolute value of nc.
+ int32_t p = 31; // Init. p.
+ uint32_t q1 = two31 / anc; // Init. q1 = 2**p/|nc|.
+ uint32_t r1 = two31 - q1 * anc; // Init. r1 = rem(2**p, |nc|).
+ uint32_t q2 = two31 / ad; // Init. q2 = 2**p/|d|.
+ uint32_t r2 = two31 - q2 * ad; // Init. r2 = rem(2**p, |d|).
+ uint32_t delta;
+ do {
+ p++;
+ q1 *= 2; // Update q1 = 2**p/|nc|.
+ r1 *= 2; // Update r1 = rem(2**p, |nc|).
+ if (r1 >= anc) { // Must be an unsigned comparison here.
+ q1++;
+ r1 = r1 - anc;
+ }
+ q2 *= 2; // Update q2 = 2**p/|d|.
+ r2 *= 2; // Update r2 = rem(2**p, |d|).
+ if (r2 >= ad) { // Must be an unsigned comparison here.
+ q2++;
+ r2 = r2 - ad;
+ }
+ delta = ad - r2;
+ } while (q1 < delta || (q1 == delta && r1 == 0));
+ int32_t mul = static_cast<int32_t>(q2 + 1);
+ multiplier_ = (d < 0) ? -mul : mul;
+ shift_ = p - 32;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index ce7d9f5b7d..0349b06582 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -82,6 +82,10 @@ class AssemblerBase: public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
+ // This function is called when code generation is aborted, so that
+ // the assembler could clean up internal data structures.
+ virtual void AbortedCodeGeneration() { }
+
static const int kMinimalBufferSize = 4*KB;
protected:
@@ -210,6 +214,12 @@ class Label BASE_EMBEDDED {
friend class Assembler;
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
+
+#if V8_TARGET_ARCH_ARM64
+ // On ARM64, the Assembler keeps track of pointers to Labels to resolve
+ // branches to distant targets. Copying labels would confuse the Assembler.
+ DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT
+#endif
};
@@ -276,9 +286,10 @@ class RelocInfo BASE_EMBEDDED {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
- // Marks a constant pool. Only used on ARM.
- // It uses a custom noncompact encoding.
+ // Marks constant and veneer pools. Only used on ARM and ARM64.
+ // They use a custom noncompact encoding.
CONST_POOL,
+ VENEER_POOL,
// add more as needed
// Pseudo-types
@@ -288,7 +299,7 @@ class RelocInfo BASE_EMBEDDED {
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
- LAST_REAL_RELOC_MODE = CONST_POOL,
+ LAST_REAL_RELOC_MODE = VENEER_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
@@ -342,6 +353,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsConstPool(Mode mode) {
return mode == CONST_POOL;
}
+ static inline bool IsVeneerPool(Mode mode) {
+ return mode == VENEER_POOL;
+ }
static inline bool IsPosition(Mode mode) {
return mode == POSITION || mode == STATEMENT_POSITION;
}
@@ -365,6 +379,15 @@ class RelocInfo BASE_EMBEDDED {
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
+ // Returns true if the first RelocInfo has the same mode and raw data as the
+ // second one.
+ static inline bool IsEqual(RelocInfo first, RelocInfo second) {
+ return first.rmode() == second.rmode() &&
+ (first.rmode() == RelocInfo::NONE64 ?
+ first.raw_data64() == second.raw_data64() :
+ first.data() == second.data());
+ }
+
// Accessors
byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; }
@@ -375,6 +398,7 @@ class RelocInfo BASE_EMBEDDED {
return BitCast<uint64_t>(data64_);
}
Code* host() const { return host_; }
+ void set_host(Code* host) { host_ = host; }
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta));
@@ -384,6 +408,10 @@ class RelocInfo BASE_EMBEDDED {
// instructions).
bool IsCodedSpecially();
+ // If true, the pointer this relocation info refers to is an entry in the
+ // constant pool, otherwise the pointer is embedded in the instruction stream.
+ bool IsInConstantPool();
+
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -406,6 +434,10 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));
+ // Returns the address of the constant pool entry where the target address
+ // is held. This should only be called if IsInConstantPool returns true.
+ INLINE(Address constant_pool_entry_address());
+
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
// The only architecture-independent user of this function is the serializer.
@@ -413,6 +445,7 @@ class RelocInfo BASE_EMBEDDED {
// output before the next target. Architecture-independent code shouldn't
// dereference the pointer it gets back from this.
INLINE(Address target_address_address());
+
// This indicates how much space a target takes up when deserializing a code
// stream. For most architectures this is just the size of a pointer. For
// an instruction like movw/movt where the target bits are mixed into the
@@ -537,7 +570,7 @@ class RelocInfoWriter BASE_EMBEDDED {
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
- inline void WriteExtraTaggedConstPoolData(int data);
+ inline void WriteExtraTaggedPoolData(int data, int pool_type);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
@@ -588,7 +621,7 @@ class RelocIterator: public Malloced {
void ReadTaggedPC();
void AdvanceReadPC();
void AdvanceReadId();
- void AdvanceReadConstPoolData();
+ void AdvanceReadPoolData();
void AdvanceReadPosition();
void AdvanceReadData();
void AdvanceReadVariableLengthPCJump();
@@ -711,12 +744,11 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
- static ExternalReference incremental_evacuation_record_write_function(
- Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
+ static ExternalReference out_of_memory_function(Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
static ExternalReference get_date_field_function(Isolate* isolate);
@@ -1002,32 +1034,6 @@ class PreservePositionScope BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Utility functions
-inline bool is_intn(int x, int n) {
- return -(1 << (n-1)) <= x && x < (1 << (n-1));
-}
-
-inline bool is_int8(int x) { return is_intn(x, 8); }
-inline bool is_int16(int x) { return is_intn(x, 16); }
-inline bool is_int18(int x) { return is_intn(x, 18); }
-inline bool is_int24(int x) { return is_intn(x, 24); }
-
-inline bool is_uintn(int x, int n) {
- return (x & -(1 << n)) == 0;
-}
-
-inline bool is_uint2(int x) { return is_uintn(x, 2); }
-inline bool is_uint3(int x) { return is_uintn(x, 3); }
-inline bool is_uint4(int x) { return is_uintn(x, 4); }
-inline bool is_uint5(int x) { return is_uintn(x, 5); }
-inline bool is_uint6(int x) { return is_uintn(x, 6); }
-inline bool is_uint8(int x) { return is_uintn(x, 8); }
-inline bool is_uint10(int x) { return is_uintn(x, 10); }
-inline bool is_uint12(int x) { return is_uintn(x, 12); }
-inline bool is_uint16(int x) { return is_uintn(x, 16); }
-inline bool is_uint24(int x) { return is_uintn(x, 24); }
-inline bool is_uint26(int x) { return is_uintn(x, 26); }
-inline bool is_uint28(int x) { return is_uintn(x, 28); }
-
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
@@ -1065,6 +1071,21 @@ class NullCallWrapper : public CallWrapper {
virtual void AfterCall() const { }
};
+
+// The multiplier and shift for signed division via multiplication, see Warren's
+// "Hacker's Delight", chapter 10.
+class MultiplierAndShift {
+ public:
+ explicit MultiplierAndShift(int32_t d);
+ int32_t multiplier() const { return multiplier_; }
+ int32_t shift() const { return shift_; }
+
+ private:
+ int32_t multiplier_;
+ int32_t shift_;
+};
+
+
} } // namespace v8::internal
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/assert-scope.cc
new file mode 100644
index 0000000000..960567cfa3
--- /dev/null
+++ b/deps/v8/src/assert-scope.cc
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "assert-scope.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+uint32_t PerIsolateAssertBase::GetData(Isolate* isolate) {
+ return isolate->per_isolate_assert_data();
+}
+
+
+void PerIsolateAssertBase::SetData(Isolate* isolate, uint32_t data) {
+ isolate->set_per_isolate_assert_data(data);
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index 269b280d02..428e6d007e 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -30,6 +30,7 @@
#include "allocation.h"
#include "platform.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -46,7 +47,13 @@ enum PerThreadAssertType {
};
-#ifdef DEBUG
+enum PerIsolateAssertType {
+ JAVASCRIPT_EXECUTION_ASSERT,
+ JAVASCRIPT_EXECUTION_THROWS,
+ ALLOCATION_FAILURE_ASSERT
+};
+
+
class PerThreadAssertData {
public:
PerThreadAssertData() : nesting_level_(0) {
@@ -72,12 +79,9 @@ class PerThreadAssertData {
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
};
-#endif // DEBUG
class PerThreadAssertScopeBase {
-#ifdef DEBUG
-
protected:
PerThreadAssertScopeBase() {
data_ = GetAssertData();
@@ -110,18 +114,12 @@ class PerThreadAssertScopeBase {
static void SetThreadLocalData(PerThreadAssertData* data) {
Thread::SetThreadLocal(thread_local_key, data);
}
-#endif // DEBUG
};
-
template <PerThreadAssertType type, bool allow>
class PerThreadAssertScope : public PerThreadAssertScopeBase {
public:
-#ifndef DEBUG
- PerThreadAssertScope() { }
- static void SetIsAllowed(bool is_allowed) { }
-#else
PerThreadAssertScope() {
old_state_ = data_->get(type);
data_->set(type, allow);
@@ -136,49 +134,140 @@ class PerThreadAssertScope : public PerThreadAssertScopeBase {
private:
bool old_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerThreadAssertScope);
+};
+
+
+class PerIsolateAssertBase {
+ protected:
+ static uint32_t GetData(Isolate* isolate);
+ static void SetData(Isolate* isolate, uint32_t data);
+};
+
+
+template <PerIsolateAssertType type, bool allow>
+class PerIsolateAssertScope : public PerIsolateAssertBase {
+ public:
+ explicit PerIsolateAssertScope(Isolate* isolate) : isolate_(isolate) {
+ STATIC_ASSERT(type < 32);
+ old_data_ = GetData(isolate_);
+ SetData(isolate_, DataBit::update(old_data_, allow));
+ }
+
+ ~PerIsolateAssertScope() {
+ SetData(isolate_, old_data_);
+ }
+
+ static bool IsAllowed(Isolate* isolate) {
+ return DataBit::decode(GetData(isolate));
+ }
+
+ private:
+ typedef BitField<bool, type, 1> DataBit;
+
+ uint32_t old_data_;
+ Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerIsolateAssertScope);
+};
+
+
+template <PerThreadAssertType type, bool allow>
+#ifdef DEBUG
+class PerThreadAssertScopeDebugOnly : public
+ PerThreadAssertScope<type, allow> {
+#else
+class PerThreadAssertScopeDebugOnly {
+ public:
+ PerThreadAssertScopeDebugOnly() { }
#endif
};
+
+template <PerIsolateAssertType type, bool allow>
+#ifdef DEBUG
+class PerIsolateAssertScopeDebugOnly : public
+ PerIsolateAssertScope<type, allow> {
+ public:
+ explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate)
+ : PerIsolateAssertScope<type, allow>(isolate) { }
+#else
+class PerIsolateAssertScopeDebugOnly {
+ public:
+ explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate) { }
+#endif
+};
+
+// Per-thread assert scopes.
+
// Scope to document where we do not expect handles to be created.
-typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, false>
DisallowHandleAllocation;
// Scope to introduce an exception to DisallowHandleAllocation.
-typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, true>
AllowHandleAllocation;
// Scope to document where we do not expect any allocation and GC.
-typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>
DisallowHeapAllocation;
// Scope to introduce an exception to DisallowHeapAllocation.
-typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, true>
AllowHeapAllocation;
// Scope to document where we do not expect any handle dereferences.
-typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, false>
DisallowHandleDereference;
// Scope to introduce an exception to DisallowHandleDereference.
-typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, true>
AllowHandleDereference;
// Scope to document where we do not expect deferred handles to be dereferenced.
-typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
DisallowDeferredHandleDereference;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
-typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
AllowDeferredHandleDereference;
// Scope to document where we do not expect deferred handles to be dereferenced.
-typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, false>
DisallowCodeDependencyChange;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
-typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>
AllowCodeDependencyChange;
+
+// Per-isolate assert scopes.
+
+// Scope to document where we do not expect javascript execution.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>
+ DisallowJavascriptExecution;
+
+// Scope to introduce an exception to DisallowJavascriptExecution.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>
+ AllowJavascriptExecution;
+
+// Scope in which javascript execution leads to exception being thrown.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
+ ThrowOnJavascriptExecution;
+
+// Scope to introduce an exception to ThrowOnJavascriptExecution.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>
+ NoThrowOnJavascriptExecution;
+
+// Scope to document where we do not expect an allocation failure.
+typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, false>
+ DisallowAllocationFailure;
+
+// Scope to introduce an exception to DisallowAllocationFailure.
+typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, true>
+ AllowAllocationFailure;
+
} } // namespace v8::internal
#endif // V8_ASSERT_SCOPE_H_
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 1a9919b5aa..f6cf18915b 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -180,8 +180,8 @@ int FunctionLiteral::end_position() const {
}
-LanguageMode FunctionLiteral::language_mode() const {
- return scope()->language_mode();
+StrictMode FunctionLiteral::strict_mode() const {
+ return scope()->strict_mode();
}
@@ -357,8 +357,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Allocate a fixed array to hold all the object literals.
Handle<JSArray> array =
isolate->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
- isolate->factory()->SetElementsCapacityAndLength(
- array, values()->length(), values()->length());
+ JSArray::Expand(array, values()->length());
// Fill in the literals.
bool is_simple = true;
@@ -379,9 +378,9 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
} else if (boilerplate_value->IsUninitialized()) {
is_simple = false;
JSObject::SetOwnElement(
- array, i, handle(Smi::FromInt(0), isolate), kNonStrictMode);
+ array, i, handle(Smi::FromInt(0), isolate), SLOPPY);
} else {
- JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
+ JSObject::SetOwnElement(array, i, boilerplate_value, SLOPPY);
}
}
@@ -593,6 +592,17 @@ void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
+int Call::ComputeFeedbackSlotCount(Isolate* isolate) {
+ CallType call_type = GetCallType(isolate);
+ if (call_type == LOOKUP_SLOT_CALL || call_type == OTHER_CALL) {
+ // Call only uses a slot in some cases.
+ return 1;
+ }
+
+ return 0;
+}
+
+
Call::CallType Call::GetCallType(Isolate* isolate) const {
VariableProxy* proxy = expression()->AsVariableProxy();
if (proxy != NULL) {
@@ -632,11 +642,14 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ int allocation_site_feedback_slot = FLAG_pretenuring_call_new
+ ? AllocationSiteFeedbackSlot()
+ : CallNewFeedbackSlot();
allocation_site_ =
- oracle->GetCallNewAllocationSite(CallNewFeedbackId());
- is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackId());
+ oracle->GetCallNewAllocationSite(allocation_site_feedback_slot);
+ is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
if (is_monomorphic_) {
- target_ = oracle->GetCallNewTarget(CallNewFeedbackId());
+ target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot());
if (!allocation_site_.is_null()) {
elements_kind_ = allocation_site_->GetElementsKind();
}
@@ -1039,6 +1052,11 @@ CaseClause::CaseClause(Zone* zone,
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
}
+#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ add_slot_node(node); \
+ }
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1051,6 +1069,12 @@ CaseClause::CaseClause(Zone* zone,
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
+#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ add_slot_node(node); \
+ add_flag(kDontSelfOptimize); \
+ }
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1085,8 +1109,8 @@ REGULAR_NODE(CountOperation)
REGULAR_NODE(BinaryOperation)
REGULAR_NODE(CompareOperation)
REGULAR_NODE(ThisFunction)
-REGULAR_NODE(Call)
-REGULAR_NODE(CallNew)
+REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call)
+REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew)
// In theory, for VariableProxy we'd have to add:
// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
// But node->var() is usually not bound yet at VariableProxy creation time, and
@@ -1111,11 +1135,12 @@ DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
-DONT_SELFOPTIMIZE_NODE(ForInStatement)
+DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral)
+
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
if (node->is_jsruntime()) {
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 2b33820f9e..c6ee71ed83 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -32,6 +32,7 @@
#include "assembler.h"
#include "factory.h"
+#include "feedback-slots.h"
#include "isolate.h"
#include "jsregexp.h"
#include "list-inl.h"
@@ -181,7 +182,7 @@ class AstProperties V8_FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
- AstProperties() : node_count_(0) { }
+ AstProperties() : node_count_(0) {}
Flags* flags() { return &flags_; }
int node_count() { return node_count_; }
@@ -914,7 +915,8 @@ class ForEachStatement : public IterationStatement {
};
-class ForInStatement V8_FINAL : public ForEachStatement {
+class ForInStatement V8_FINAL : public ForEachStatement,
+ public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(ForInStatement)
@@ -922,7 +924,16 @@ class ForInStatement V8_FINAL : public ForEachStatement {
return subject();
}
- TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
+ // Type feedback information.
+ virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate) { return 1; }
+ virtual void SetFirstFeedbackSlot(int slot) { for_in_feedback_slot_ = slot; }
+
+ int ForInFeedbackSlot() {
+ ASSERT(for_in_feedback_slot_ != kInvalidFeedbackSlot);
+ return for_in_feedback_slot_;
+ }
+
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return for_in_type_; }
void set_for_in_type(ForInType type) { for_in_type_ = type; }
@@ -936,11 +947,13 @@ class ForInStatement V8_FINAL : public ForEachStatement {
ForInStatement(Zone* zone, ZoneStringList* labels, int pos)
: ForEachStatement(zone, labels, pos),
for_in_type_(SLOW_FOR_IN),
+ for_in_feedback_slot_(kInvalidFeedbackSlot),
body_id_(GetNextId(zone)),
prepare_id_(GetNextId(zone)) {
}
ForInType for_in_type_;
+ int for_in_feedback_slot_;
const BailoutId body_id_;
const BailoutId prepare_id_;
};
@@ -1733,7 +1746,7 @@ class Property V8_FINAL : public Expression {
};
-class Call V8_FINAL : public Expression {
+class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(Call)
@@ -1741,7 +1754,16 @@ class Call V8_FINAL : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
+ virtual ComputablePhase GetComputablePhase() { return AFTER_SCOPING; }
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate);
+ virtual void SetFirstFeedbackSlot(int slot) {
+ call_feedback_slot_ = slot;
+ }
+
+ bool HasCallFeedbackSlot() const {
+ return call_feedback_slot_ != kInvalidFeedbackSlot;
+ }
+ int CallFeedbackSlot() const { return call_feedback_slot_; }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
if (expression()->IsProperty()) {
@@ -1790,6 +1812,7 @@ class Call V8_FINAL : public Expression {
: Expression(zone, pos),
expression_(expression),
arguments_(arguments),
+ call_feedback_slot_(kInvalidFeedbackSlot),
return_id_(GetNextId(zone)) {
if (expression->IsProperty()) {
expression->AsProperty()->mark_for_call();
@@ -1802,12 +1825,13 @@ class Call V8_FINAL : public Expression {
Handle<JSFunction> target_;
Handle<Cell> cell_;
+ int call_feedback_slot_;
const BailoutId return_id_;
};
-class CallNew V8_FINAL : public Expression {
+class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(CallNew)
@@ -1815,7 +1839,24 @@ class CallNew V8_FINAL : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
+ virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate) {
+ return FLAG_pretenuring_call_new ? 2 : 1;
+ }
+ virtual void SetFirstFeedbackSlot(int slot) {
+ callnew_feedback_slot_ = slot;
+ }
+
+ int CallNewFeedbackSlot() {
+ ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
+ return callnew_feedback_slot_;
+ }
+ int AllocationSiteFeedbackSlot() {
+ ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
+ ASSERT(FLAG_pretenuring_call_new);
+ return callnew_feedback_slot_ + 1;
+ }
+
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
@@ -1824,6 +1865,8 @@ class CallNew V8_FINAL : public Expression {
return allocation_site_;
}
+ static int feedback_slots() { return 1; }
+
BailoutId ReturnId() const { return return_id_; }
protected:
@@ -1836,6 +1879,7 @@ class CallNew V8_FINAL : public Expression {
arguments_(arguments),
is_monomorphic_(false),
elements_kind_(GetInitialFastElementsKind()),
+ callnew_feedback_slot_(kInvalidFeedbackSlot),
return_id_(GetNextId(zone)) { }
private:
@@ -1846,6 +1890,7 @@ class CallNew V8_FINAL : public Expression {
Handle<JSFunction> target_;
ElementsKind elements_kind_;
Handle<AllocationSite> allocation_site_;
+ int callnew_feedback_slot_;
const BailoutId return_id_;
};
@@ -2276,8 +2321,7 @@ class FunctionLiteral V8_FINAL : public Expression {
int SourceSize() const { return end_position() - start_position(); }
bool is_expression() const { return IsExpression::decode(bitfield_); }
bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
- bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
- LanguageMode language_mode() const;
+ StrictMode strict_mode() const;
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
@@ -2332,7 +2376,15 @@ class FunctionLiteral V8_FINAL : public Expression {
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
-
+ void set_slot_processor(DeferredFeedbackSlotProcessor* slot_processor) {
+ slot_processor_ = *slot_processor;
+ }
+ void ProcessFeedbackSlots(Isolate* isolate) {
+ slot_processor_.ProcessFeedbackSlots(isolate);
+ }
+ int slot_count() {
+ return slot_processor_.slot_count();
+ }
bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
void set_dont_optimize_reason(BailoutReason reason) {
@@ -2382,6 +2434,7 @@ class FunctionLiteral V8_FINAL : public Expression {
ZoneList<Statement*>* body_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
+ DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_;
int materialized_literal_count_;
@@ -2856,10 +2909,13 @@ private: \
class AstConstructionVisitor BASE_EMBEDDED {
public:
- AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
+ explicit AstConstructionVisitor(Zone* zone)
+ : dont_optimize_reason_(kNoReason),
+ zone_(zone) { }
AstProperties* ast_properties() { return &properties_; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ DeferredFeedbackSlotProcessor* slot_processor() { return &slot_processor_; }
private:
template<class> friend class AstNodeFactory;
@@ -2876,13 +2932,21 @@ class AstConstructionVisitor BASE_EMBEDDED {
dont_optimize_reason_ = reason;
}
+ void add_slot_node(FeedbackSlotInterface* slot_node) {
+ slot_processor_.add_slot_node(zone_, slot_node);
+ }
+
AstProperties properties_;
+ DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_;
+ Zone* zone_;
};
class AstNullVisitor BASE_EMBEDDED {
public:
+ explicit AstNullVisitor(Zone* zone) {}
+
// Node visitors.
#define DEF_VISIT(type) \
void Visit##type(type* node) {}
@@ -2898,7 +2962,9 @@ class AstNullVisitor BASE_EMBEDDED {
template<class Visitor>
class AstNodeFactory V8_FINAL BASE_EMBEDDED {
public:
- explicit AstNodeFactory(Zone* zone) : zone_(zone) { }
+ explicit AstNodeFactory(Zone* zone)
+ : zone_(zone),
+ visitor_(zone) { }
Visitor* visitor() { return &visitor_; }
diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h
index 789721edfc..08be2a7d37 100644
--- a/deps/v8/src/atomicops.h
+++ b/deps/v8/src/atomicops.h
@@ -51,6 +51,15 @@
#include "../include/v8.h"
#include "globals.h"
+#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
namespace v8 {
namespace internal {
@@ -58,9 +67,7 @@ typedef int32_t Atomic32;
#ifdef V8_HOST_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
-#if defined(__ILP32__) || defined(__APPLE__)
-// MacOS is an exception to the implicit conversion rule above,
-// because it uses long for intptr_t.
+#if defined(__ILP32__)
typedef int64_t Atomic64;
#else
typedef intptr_t Atomic64;
@@ -69,11 +76,7 @@ typedef intptr_t Atomic64;
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
-#if defined(__OpenBSD__) && defined(__i386__)
-typedef Atomic32 AtomicWord;
-#else
typedef intptr_t AtomicWord;
-#endif
// Atomically execute:
// result = *ptr;
@@ -155,16 +158,24 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "atomicops_internals_x86_macosx.h"
-#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "atomicops_internals_x86_gcc.h"
+#elif defined(__APPLE__)
+#include "atomicops_internals_mac.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
+#include "atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
+#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
+// On some platforms we need additional declarations to make
+// AtomicWord compatible with our other Atomic* types.
+#if defined(__APPLE__) || defined(__OpenBSD__)
+#include "atomicops_internals_atomicword_compat.h"
+#endif
+
#endif // V8_ATOMICOPS_H_
diff --git a/deps/v8/src/atomicops_internals_arm64_gcc.h b/deps/v8/src/atomicops_internals_arm64_gcc.h
new file mode 100644
index 0000000000..e6cac19932
--- /dev/null
+++ b/deps/v8/src/atomicops_internals_arm64_gcc.h
@@ -0,0 +1,372 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace v8 {
+namespace internal {
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory"
+ ); // NOLINT
+}
+
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ "clrex \n\t" // In case we didn't swap.
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "add %w[result], %w[result], %w[increment]\n\t"
+ "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
+ "cbnz %w[temp], 0b \n\t" // Retry on failure.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ MemoryBarrier();
+ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "dmb ish \n\t" // Data memory barrier.
+ "1: \n\t"
+ // If the compare failed the 'dmb' is unnecessary, but we still need a
+ // 'clrex'.
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ MemoryBarrier();
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ // If the compare failed the we still need a 'clrex'.
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "add %[result], %[result], %[increment] \n\t"
+ "stxr %w[temp], %[result], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ MemoryBarrier();
+ Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "dmb ish \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ MemoryBarrier();
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/deps/v8/src/atomicops_internals_arm_gcc.h b/deps/v8/src/atomicops_internals_arm_gcc.h
index 6c30256d93..918920d02a 100644
--- a/deps/v8/src/atomicops_internals_arm_gcc.h
+++ b/deps/v8/src/atomicops_internals_arm_gcc.h
@@ -32,46 +32,197 @@
#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#if defined(__QNXNTO__)
+#include <sys/cpuinline.h>
+#endif
+
namespace v8 {
namespace internal {
-// 0xffff0fc0 is the hard coded address of a function provided by
-// the kernel which implements an atomic compare-exchange. On older
-// ARM architecture revisions (pre-v6) this may be implemented using
-// a syscall. This address is stable, and in active use (hard coded)
-// by at least glibc-2.7 and the Android C library.
-typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
- Atomic32 new_value,
- volatile Atomic32* ptr);
-LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
- (LinuxKernelCmpxchgFunc) 0xffff0fc0;
+// Memory barriers on ARM are funky, but the kernel is here to help:
+//
+// * ARMv5 didn't support SMP, there is no memory barrier instruction at
+// all on this architecture, or when targeting its machine code.
+//
+// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
+// writing a random value to a very specific coprocessor register.
+//
+// * On ARMv7, the "dmb" instruction is used to perform a full memory
+// barrier (though writing to the co-processor will still work).
+// However, on single core devices (e.g. Nexus One, or Nexus S),
+// this instruction will take up to 200 ns, which is huge, even though
+// it's completely un-needed on these devices.
+//
+// * There is no easy way to determine at runtime if the device is
+// single or multi-core. However, the kernel provides a useful helper
+// function at a fixed memory address (0xffff0fa0), which will always
+// perform a memory barrier in the most efficient way. I.e. on single
+// core devices, this is an empty function that exits immediately.
+// On multi-core devices, it implements a full memory barrier.
+//
+// * This source could be compiled to ARMv5 machine code that runs on a
+// multi-core ARMv6 or ARMv7 device. In this case, memory barriers
+// are needed for correct execution. Always call the kernel helper, even
+// when targeting ARMv5TE.
+//
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
- (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
+inline void MemoryBarrier() {
+#if defined(__linux__) || defined(__ANDROID__)
+ // Note: This is a function call, which is also an implicit compiler barrier.
+ typedef void (*KernelMemoryBarrierFunc)();
+ ((KernelMemoryBarrierFunc)0xffff0fa0)();
+#elif defined(__QNXNTO__)
+ __cpu_membarrier();
+#else
+#error MemoryBarrier() is not implemented on this platform.
+#endif
+}
+// An ARM toolchain would only define one of these depending on which
+// variant of the target architecture is being used. This tests against
+// any known ARMv6 or ARMv7 variant, where it is possible to directly
+// use ldrex/strex instructions to implement fast atomic operations.
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
+ defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
+ defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_6T2__)
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- Atomic32 prev_value = *ptr;
+ Atomic32 prev_value;
+ int reloop;
do {
- if (!pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
+ // The following is equivalent to:
+ //
+ // prev_value = LDREX(ptr)
+ // reloop = 0
+ // if (prev_value != old_value)
+ // reloop = STREX(ptr, new_value)
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " mov %1, #0\n"
+ " cmp %0, %4\n"
+#ifdef __thumb2__
+ " it eq\n"
+#endif
+ " strexeq %1, %5, [%3]\n"
+ : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(old_value), "r"(new_value)
+ : "cc", "memory");
+ } while (reloop != 0);
return prev_value;
}
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return result;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 value;
+ int reloop;
+ do {
+ // Equivalent to:
+ //
+ // value = LDREX(ptr)
+ // value += increment
+ // reloop = STREX(ptr, value)
+ //
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " add %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+ : "=&r"(value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(increment)
+ : "cc", "memory");
+ } while (reloop);
+ return value;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ // TODO(digit): Investigate if it's possible to implement this with
+ // a single MemoryBarrier() operation between the LDREX and STREX.
+ // See http://crbug.com/246514
+ MemoryBarrier();
+ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ int reloop;
+ do {
+ // old_value = LDREX(ptr)
+ // reloop = STREX(ptr, new_value)
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " strex %1, %4, [%3]\n"
+ : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(new_value)
+ : "cc", "memory");
+ } while (reloop != 0);
+ return old_value;
+}
+
+// This tests against any known ARMv5 variant.
+#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
+
+// The kernel also provides a helper function to perform an atomic
+// compare-and-swap operation at the hard-wired address 0xffff0fc0.
+// On ARMv5, this is implemented by a special code path that the kernel
+// detects and treats specially when thread pre-emption happens.
+// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
+//
+// Note that this always perform a full memory barrier, there is no
+// need to add calls MemoryBarrier() before or after it. It also
+// returns 0 on success, and 1 on exit.
+//
+// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
+// use newer kernel revisions, so this should not be a concern.
+namespace {
+
+inline int LinuxKernelCmpxchg(Atomic32 old_value,
+ Atomic32 new_value,
+ volatile Atomic32* ptr) {
+ typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
+ return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
+}
+
+} // namespace
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ for (;;) {
+ prev_value = *ptr;
+ if (prev_value != old_value)
+ return prev_value;
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+ return old_value;
+ }
+}
+
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
- } while (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
+ } while (LinuxKernelCmpxchg(old_value, new_value, ptr));
return old_value;
}
@@ -86,8 +237,7 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
// Atomic exchange the old value with an incremented one.
Atomic32 old_value = *ptr;
Atomic32 new_value = old_value + increment;
- if (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)) == 0) {
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
// The exchange took place as expected.
return new_value;
}
@@ -98,23 +248,46 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ Atomic32 prev_value;
+ for (;;) {
+ prev_value = *ptr;
+ if (prev_value != old_value) {
+ // Always ensure acquire semantics.
+ MemoryBarrier();
+ return prev_value;
+ }
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+ return old_value;
+ }
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ // This could be implemented as:
+ // MemoryBarrier();
+ // return NoBarrier_CompareAndSwap();
+ //
+ // But would use 3 barriers per succesful CAS. To save performance,
+ // use Acquire_CompareAndSwap(). Its implementation guarantees that:
+ // - A succesful swap uses only 2 barriers (in the kernel helper).
+ // - An early return due to (prev_value != old_value) performs
+ // a memory barrier with no store, which is equivalent to the
+ // generic implementation above.
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
+#else
+# error "Your CPU's ARM architecture is not supported yet"
+#endif
+
+// NOTE: Atomicity of the following load and store operations is only
+// guaranteed in case of 32-bit alignement of |ptr| values.
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
-inline void MemoryBarrier() {
- pLinuxKernelMemoryBarrier();
-}
-
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
@@ -125,9 +298,7 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
diff --git a/deps/v8/src/atomicops_internals_atomicword_compat.h b/deps/v8/src/atomicops_internals_atomicword_compat.h
new file mode 100644
index 0000000000..5934f70689
--- /dev/null
+++ b/deps/v8/src/atomicops_internals_atomicword_compat.h
@@ -0,0 +1,122 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+#define V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+
+// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,
+// which in turn means int. On some LP32 platforms, intptr_t is an int, but
+// on others, it's a long. When AtomicWord and Atomic32 are based on different
+// fundamental types, their pointers are incompatible.
+//
+// This file defines function overloads to allow both AtomicWord and Atomic32
+// data to be used with this interface.
+//
+// On LP64 platforms, AtomicWord and Atomic64 are both always long,
+// so this problem doesn't occur.
+
+#if !defined(V8_HOST_ARCH_64_BIT)
+
+namespace v8 {
+namespace internal {
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return NoBarrier_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return NoBarrier_AtomicExchange(
+ reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return NoBarrier_AtomicIncrement(
+ reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return Barrier_AtomicIncrement(
+ reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return v8::internal::Acquire_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return v8::internal::Release_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+ NoBarrier_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return v8::internal::Acquire_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return v8::internal::Release_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+ return NoBarrier_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+ return v8::internal::Acquire_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+ return v8::internal::Release_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+} } // namespace v8::internal
+
+#endif // !defined(V8_HOST_ARCH_64_BIT)
+
+#endif // V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
diff --git a/deps/v8/src/atomicops_internals_x86_macosx.h b/deps/v8/src/atomicops_internals_mac.h
index bfb02b3851..4bd0c09bdf 100644
--- a/deps/v8/src/atomicops_internals_x86_macosx.h
+++ b/deps/v8/src/atomicops_internals_mac.h
@@ -27,8 +27,8 @@
// This file is an internal atomic implementation, use atomicops.h instead.
-#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
-#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+#ifndef V8_ATOMICOPS_INTERNALS_MAC_H_
+#define V8_ATOMICOPS_INTERNALS_MAC_H_
#include <libkern/OSAtomic.h>
@@ -65,7 +65,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
+ Atomic32 increment) {
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}
@@ -132,7 +132,7 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
+ reinterpret_cast<volatile int64_t*>(ptr))) {
return old_value;
}
prev_value = *ptr;
@@ -146,18 +146,19 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr)));
+ reinterpret_cast<volatile int64_t*>(ptr)));
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
- return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
+ return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
- return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
+ return OSAtomicAdd64Barrier(increment,
+ reinterpret_cast<volatile int64_t*>(ptr));
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
@@ -165,8 +166,8 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 prev_value;
do {
- if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
+ if (OSAtomicCompareAndSwap64Barrier(
+ old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
return old_value;
}
prev_value = *ptr;
@@ -213,89 +214,6 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
#endif // defined(__LP64__)
-// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
-// on the Mac, even when they are the same size. We need to explicitly cast
-// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
-#ifdef __LP64__
-#define AtomicWordCastType Atomic64
-#else
-#define AtomicWordCastType Atomic32
-#endif
-
-inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return NoBarrier_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
- AtomicWord new_value) {
- return NoBarrier_AtomicExchange(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return NoBarrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return Barrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return v8::internal::Acquire_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return v8::internal::Release_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
- NoBarrier_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::internal::Acquire_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::internal::Release_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
- return NoBarrier_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
- return v8::internal::Acquire_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
- return v8::internal::Release_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-#undef AtomicWordCastType
-
} } // namespace v8::internal
-#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+#endif // V8_ATOMICOPS_INTERNALS_MAC_H_
diff --git a/deps/v8/src/atomicops_internals_tsan.h b/deps/v8/src/atomicops_internals_tsan.h
index b5162bad9f..1819798a5d 100644
--- a/deps/v8/src/atomicops_internals_tsan.h
+++ b/deps/v8/src/atomicops_internals_tsan.h
@@ -53,10 +53,7 @@ extern struct AtomicOps_x86CPUFeatureStruct
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-#ifdef __cplusplus
extern "C" {
-#endif
-
typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
@@ -80,152 +77,149 @@ typedef enum {
__tsan_memory_order_seq_cst,
} __tsan_memory_order;
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
__tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
__tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
__tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
__tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
__tsan_memory_order mo);
-void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
__tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
__tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
__tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
__tsan_memory_order mo);
-void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
__tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
+ __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
+ __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+ volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+ volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+ volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+ volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
- volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+ volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
-
-#ifdef __cplusplus
} // extern "C"
-#endif
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
@@ -234,37 +228,37 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
return cmp;
}
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_relaxed);
}
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_acquire);
}
-inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_release);
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
@@ -273,7 +267,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
return cmp;
}
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
@@ -282,33 +276,33 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
return cmp;
}
-inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
-inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
@@ -317,60 +311,60 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
return cmp;
}
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}
-inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
-inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}
-inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
@@ -379,7 +373,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
return cmp;
}
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
diff --git a/deps/v8/src/atomicops_internals_x86_msvc.h b/deps/v8/src/atomicops_internals_x86_msvc.h
index fcf6a65107..ad9cf9d80b 100644
--- a/deps/v8/src/atomicops_internals_x86_msvc.h
+++ b/deps/v8/src/atomicops_internals_x86_msvc.h
@@ -33,6 +33,15 @@
#include "checks.h"
#include "win32-headers.h"
+#if defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
namespace v8 {
namespace internal {
@@ -70,8 +79,13 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
#error "We require at least vs2005 for MemoryBarrier"
#endif
inline void MemoryBarrier() {
+#if defined(V8_HOST_ARCH_64_BIT)
+ // See #undef and note at the top of this file.
+ __faststorefence();
+#else
// We use MemoryBarrier from WinNT.h
::MemoryBarrier();
+#endif
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index ef802ba987..c4d7adfbbd 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -88,6 +88,8 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
source.length());
Handle<String> source_code =
isolate_->factory()->NewExternalStringFromAscii(resource);
+ // We do not expect this to throw an exception. Change this if it does.
+ CHECK_NOT_EMPTY_HANDLE(isolate_, source_code);
heap->natives_source_cache()->set(index, *source_code);
}
Handle<Object> cached_source(heap->natives_source_cache()->get(index),
@@ -152,7 +154,7 @@ char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
void Bootstrapper::TearDown() {
if (delete_these_non_arrays_on_tear_down_ != NULL) {
int len = delete_these_non_arrays_on_tear_down_->length();
- ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
+ ASSERT(len < 24); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete delete_these_non_arrays_on_tear_down_->at(i);
delete_these_non_arrays_on_tear_down_->at(i) = NULL;
@@ -231,6 +233,7 @@ class Genesis BASE_EMBEDDED {
// Installs the contents of the native .js files on the global objects.
// Used for creating a context from scratch.
void InstallNativeFunctions();
+ void InstallExperimentalBuiltinFunctionIds();
void InstallExperimentalNativeFunctions();
Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins,
const char* name,
@@ -299,7 +302,7 @@ class Genesis BASE_EMBEDDED {
PrototypePropertyMode prototypeMode);
void MakeFunctionInstancePrototypeWritable();
- Handle<Map> CreateStrictModeFunctionMap(
+ Handle<Map> CreateStrictFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function);
@@ -327,8 +330,8 @@ class Genesis BASE_EMBEDDED {
// prototype for the processing of JS builtins. Later the function maps are
// replaced in order to make prototype writable. These are the final, writable
// prototype, maps.
- Handle<Map> function_map_writable_prototype_;
- Handle<Map> strict_mode_function_map_writable_prototype_;
+ Handle<Map> sloppy_function_map_writable_prototype_;
+ Handle<Map> strict_function_map_writable_prototype_;
Handle<JSFunction> throw_type_error_function;
BootstrapperActive active_;
@@ -473,18 +476,19 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
CreateFunctionMap(DONT_ADD_PROTOTYPE);
- native_context()->set_function_without_prototype_map(
+ native_context()->set_sloppy_function_without_prototype_map(
*function_without_prototype_map);
// Allocate the function map. This map is temporary, used only for processing
// of builtins.
// Later the map is replaced with writable prototype map, allocated below.
Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE);
- native_context()->set_function_map(*function_map);
+ native_context()->set_sloppy_function_map(*function_map);
// The final map for functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- function_map_writable_prototype_ = CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
+ sloppy_function_map_writable_prototype_ =
+ CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
Factory* factory = isolate->factory();
@@ -518,7 +522,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Handle<String> empty_string =
factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty"));
Handle<JSFunction> empty_function =
- factory->NewFunctionWithoutPrototype(empty_string, CLASSIC_MODE);
+ factory->NewFunctionWithoutPrototype(empty_string, SLOPPY);
// --- E m p t y ---
Handle<Code> code =
@@ -536,10 +540,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
empty_function->shared()->DontAdaptArguments();
// Set prototypes for the function maps.
- native_context()->function_map()->set_prototype(*empty_function);
- native_context()->function_without_prototype_map()->
+ native_context()->sloppy_function_map()->set_prototype(*empty_function);
+ native_context()->sloppy_function_without_prototype_map()->
set_prototype(*empty_function);
- function_map_writable_prototype_->set_prototype(*empty_function);
+ sloppy_function_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
@@ -603,11 +607,10 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
Handle<String> name = factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("ThrowTypeError"));
throw_type_error_function =
- factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
+ factory()->NewFunctionWithoutPrototype(name, SLOPPY);
Handle<Code> code(isolate()->builtins()->builtin(
Builtins::kStrictModePoisonPill));
- throw_type_error_function->set_map(
- native_context()->function_map());
+ throw_type_error_function->set_map(native_context()->sloppy_function_map());
throw_type_error_function->set_code(*code);
throw_type_error_function->shared()->set_code(*code);
throw_type_error_function->shared()->DontAdaptArguments();
@@ -618,7 +621,7 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
}
-Handle<Map> Genesis::CreateStrictModeFunctionMap(
+Handle<Map> Genesis::CreateStrictFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
@@ -631,28 +634,27 @@ Handle<Map> Genesis::CreateStrictModeFunctionMap(
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for the prototype-less strict mode instances.
- Handle<Map> strict_mode_function_without_prototype_map =
- CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_without_prototype_map(
- *strict_mode_function_without_prototype_map);
+ Handle<Map> strict_function_without_prototype_map =
+ CreateStrictFunctionMap(DONT_ADD_PROTOTYPE, empty);
+ native_context()->set_strict_function_without_prototype_map(
+ *strict_function_without_prototype_map);
// Allocate map for the strict mode functions. This map is temporary, used
// only for processing of builtins.
// Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> strict_mode_function_map =
- CreateStrictModeFunctionMap(ADD_READONLY_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_map(
- *strict_mode_function_map);
+ Handle<Map> strict_function_map =
+ CreateStrictFunctionMap(ADD_READONLY_PROTOTYPE, empty);
+ native_context()->set_strict_function_map(*strict_function_map);
// The final map for the strict mode functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- strict_mode_function_map_writable_prototype_ =
- CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
+ strict_function_map_writable_prototype_ =
+ CreateStrictFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
// Complete the callbacks.
- PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map);
- PoisonArgumentsAndCaller(strict_mode_function_map);
- PoisonArgumentsAndCaller(strict_mode_function_map_writable_prototype_);
+ PoisonArgumentsAndCaller(strict_function_without_prototype_map);
+ PoisonArgumentsAndCaller(strict_function_map);
+ PoisonArgumentsAndCaller(strict_function_map_writable_prototype_);
}
@@ -1097,7 +1099,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
{ \
Handle<JSFunction> fun = InstallTypedArray(#Type "Array", \
- EXTERNAL_##TYPE##_ELEMENTS); \
+ TYPE##_ELEMENTS); \
native_context()->set_##type##_array_fun(*fun); \
}
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
@@ -1112,6 +1114,18 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
native_context()->set_data_view_fun(*data_view_fun);
}
+ { // -- W e a k M a p
+ InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ }
+
+ { // -- W e a k S e t
+ InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ }
+
{ // --- arguments_boilerplate_
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
@@ -1136,7 +1150,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
function->shared()->set_expected_nof_properties(2);
Handle<JSObject> result = factory->NewJSObject(function);
- native_context()->set_arguments_boilerplate(*result);
+ native_context()->set_sloppy_arguments_boilerplate(*result);
// Note: length must be added as the first property and
// callee must be added as the second property.
CHECK_NOT_EMPTY_HANDLE(isolate,
@@ -1172,22 +1186,23 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{ // --- aliased_arguments_boilerplate_
// Set up a well-formed parameter map to make assertions happy.
Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set_map(heap->non_strict_arguments_elements_map());
+ elements->set_map(heap->sloppy_arguments_elements_map());
Handle<FixedArray> array;
array = factory->NewFixedArray(0);
elements->set(0, *array);
array = factory->NewFixedArray(0);
elements->set(1, *array);
- Handle<Map> old_map(native_context()->arguments_boilerplate()->map());
+ Handle<Map> old_map(
+ native_context()->sloppy_arguments_boilerplate()->map());
Handle<Map> new_map = factory->CopyMap(old_map);
new_map->set_pre_allocated_property_fields(2);
Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
// Set elements kind after allocating the object because
// NewJSObjectFromMap assumes a fast elements map.
- new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
+ new_map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
result->set_elements(*elements);
- ASSERT(result->HasNonStrictArgumentsElements());
+ ASSERT(result->HasSloppyArgumentsElements());
native_context()->set_aliased_arguments_boilerplate(*result);
}
@@ -1210,7 +1225,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Create the map. Allocate one in-object field for length.
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
- Heap::kArgumentsObjectSizeStrict);
+ Heap::kStrictArgumentsObjectSize);
// Create the descriptor array for the arguments object.
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 3);
DescriptorArray::WhitenessWitness witness(*descriptors);
@@ -1239,13 +1254,13 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
map->set_pre_allocated_property_fields(1);
map->set_inobject_properties(1);
- // Copy constructor from the non-strict arguments boilerplate.
+ // Copy constructor from the sloppy arguments boilerplate.
map->set_constructor(
- native_context()->arguments_boilerplate()->map()->constructor());
+ native_context()->sloppy_arguments_boilerplate()->map()->constructor());
// Allocate the arguments boilerplate object.
Handle<JSObject> result = factory->NewJSObjectFromMap(map);
- native_context()->set_strict_mode_arguments_boilerplate(*result);
+ native_context()->set_strict_arguments_boilerplate(*result);
// Add length property only for strict mode boilerplate.
CHECK_NOT_EMPTY_HANDLE(isolate,
@@ -1309,9 +1324,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
delegate->shared()->DontAdaptArguments();
}
- // Initialize the out of memory slot.
- native_context()->set_out_of_memory(heap->false_value());
-
// Initialize the embedder data slot.
Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
native_context()->set_embedder_data(*embedder_data);
@@ -1349,23 +1361,13 @@ void Genesis::InitializeExperimentalGlobal() {
}
if (FLAG_harmony_collections) {
- { // -- S e t
- InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
- }
{ // -- M a p
InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
}
- { // -- W e a k M a p
- InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
- }
- { // -- W e a k S e t
- InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
+ { // -- S e t
+ InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
}
@@ -1388,18 +1390,19 @@ void Genesis::InitializeExperimentalGlobal() {
// Create maps for generator functions and their prototypes. Store those
// maps in the native context.
- Handle<Map> function_map(native_context()->function_map());
+ Handle<Map> function_map(native_context()->sloppy_function_map());
Handle<Map> generator_function_map = factory()->CopyMap(function_map);
generator_function_map->set_prototype(*generator_function_prototype);
- native_context()->set_generator_function_map(*generator_function_map);
+ native_context()->set_sloppy_generator_function_map(
+ *generator_function_map);
Handle<Map> strict_mode_function_map(
- native_context()->strict_mode_function_map());
+ native_context()->strict_function_map());
Handle<Map> strict_mode_generator_function_map = factory()->CopyMap(
strict_mode_function_map);
strict_mode_generator_function_map->set_prototype(
*generator_function_prototype);
- native_context()->set_strict_mode_generator_function_map(
+ native_context()->set_strict_generator_function_map(
*strict_mode_generator_function_map);
Handle<Map> object_map(native_context()->object_function()->initial_map());
@@ -1461,6 +1464,7 @@ bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Handle<String> source_code =
factory->NewStringFromAscii(
ExperimentalNatives::GetRawScriptSource(index));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, source_code, false);
return CompileNative(isolate, name, source_code);
}
@@ -1510,6 +1514,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
if (cache == NULL || !cache->Lookup(name, &function_info)) {
ASSERT(source->IsOneByteRepresentation());
Handle<String> script_name = factory->NewStringFromUtf8(name);
+ ASSERT(!script_name.is_null());
function_info = Compiler::CompileScript(
source,
script_name,
@@ -1519,7 +1524,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
top_context,
extension,
NULL,
- Handle<String>::null(),
+ NO_CACHED_DATA,
use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
if (function_info.is_null()) return false;
if (cache != NULL) cache->Add(name, function_info);
@@ -1562,6 +1567,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
void Genesis::InstallNativeFunctions() {
HandleScope scope(isolate());
INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
+
INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
INSTALL_NATIVE(JSFunction, "ToDetailString", to_detail_string_fun);
@@ -1569,6 +1575,7 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
+
INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
@@ -1577,25 +1584,34 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSObject, "functionCache", function_cache);
INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
to_complete_property_descriptor);
+
+ INSTALL_NATIVE(JSFunction, "IsPromise", is_promise);
+ INSTALL_NATIVE(JSFunction, "PromiseCreate", promise_create);
+ INSTALL_NATIVE(JSFunction, "PromiseResolve", promise_resolve);
+ INSTALL_NATIVE(JSFunction, "PromiseReject", promise_reject);
+ INSTALL_NATIVE(JSFunction, "PromiseChain", promise_chain);
+ INSTALL_NATIVE(JSFunction, "PromiseCatch", promise_catch);
+
+ INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
+ INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice);
+ INSTALL_NATIVE(JSFunction, "BeginPerformSplice",
+ observers_begin_perform_splice);
+ INSTALL_NATIVE(JSFunction, "EndPerformSplice",
+ observers_end_perform_splice);
}
void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "RunMicrotasks", run_microtasks);
+ INSTALL_NATIVE(JSFunction, "EnqueueExternalMicrotask",
+ enqueue_external_microtask);
+
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
- if (FLAG_harmony_observation) {
- INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
- INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice);
- INSTALL_NATIVE(JSFunction, "BeginPerformSplice",
- observers_begin_perform_splice);
- INSTALL_NATIVE(JSFunction, "EndPerformSplice",
- observers_end_perform_splice);
- }
}
#undef INSTALL_NATIVE
@@ -1751,9 +1767,6 @@ bool Genesis::InstallNatives() {
STATIC_ASCII_VECTOR("column_offset")));
Handle<Foreign> script_column_offset(
factory()->NewForeign(&Accessors::ScriptColumnOffset));
- Handle<String> data_string(factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("data")));
- Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData));
Handle<String> type_string(factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("type")));
Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType));
@@ -1818,11 +1831,6 @@ bool Genesis::InstallNatives() {
}
{
- CallbacksDescriptor d(*data_string, *script_data, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
CallbacksDescriptor d(*type_string, *script_type, attribs);
script_map->AppendDescriptor(&d, witness);
}
@@ -2047,8 +2055,6 @@ bool Genesis::InstallExperimentalNatives() {
INSTALL_EXPERIMENTAL_NATIVE(i, symbols, "symbol.js")
INSTALL_EXPERIMENTAL_NATIVE(i, proxies, "proxy.js")
INSTALL_EXPERIMENTAL_NATIVE(i, collections, "collection.js")
- INSTALL_EXPERIMENTAL_NATIVE(i, observation, "object-observe.js")
- INSTALL_EXPERIMENTAL_NATIVE(i, promises, "promise.js")
INSTALL_EXPERIMENTAL_NATIVE(i, generators, "generator.js")
INSTALL_EXPERIMENTAL_NATIVE(i, iteration, "array-iterator.js")
INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js")
@@ -2057,7 +2063,7 @@ bool Genesis::InstallExperimentalNatives() {
}
InstallExperimentalNativeFunctions();
-
+ InstallExperimentalBuiltinFunctionIds();
return true;
}
@@ -2076,8 +2082,10 @@ static Handle<JSObject> ResolveBuiltinIdHolder(
ASSERT_EQ(".prototype", period_pos);
Vector<const char> property(holder_expr,
static_cast<int>(period_pos - holder_expr));
+ Handle<String> property_string = factory->InternalizeUtf8String(property);
+ ASSERT(!property_string.is_null());
Handle<JSFunction> function = Handle<JSFunction>::cast(
- GetProperty(isolate, global, factory->InternalizeUtf8String(property)));
+ GetProperty(isolate, global, property_string));
return Handle<JSObject>(JSObject::cast(function->prototype()));
}
@@ -2107,6 +2115,15 @@ void Genesis::InstallBuiltinFunctionIds() {
}
+void Genesis::InstallExperimentalBuiltinFunctionIds() {
+ HandleScope scope(isolate());
+ if (FLAG_harmony_maths) {
+ Handle<JSObject> holder = ResolveBuiltinIdHolder(native_context(), "Math");
+ InstallBuiltinFunctionId(holder, "clz32", kMathClz32);
+ }
+}
+
+
// Do not forget to update macros.py with named constant
// of cache id.
#define JSFUNCTION_RESULT_CACHE_LIST(F) \
@@ -2336,6 +2353,8 @@ bool Genesis::InstallExtension(Isolate* isolate,
}
Handle<String> source_code =
isolate->factory()->NewExternalStringFromAscii(extension->source());
+ // We do not expect this to throw an exception. Change this if it does.
+ CHECK_NOT_EMPTY_HANDLE(isolate, source_code);
bool result = CompileScriptCached(isolate,
CStrVector(extension->name()),
source_code,
@@ -2546,13 +2565,14 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
// The maps with writable prototype are created in CreateEmptyFunction
// and CreateStrictModeFunctionMaps respectively. Initially the maps are
// created with read-only prototype for JS builtins processing.
- ASSERT(!function_map_writable_prototype_.is_null());
- ASSERT(!strict_mode_function_map_writable_prototype_.is_null());
+ ASSERT(!sloppy_function_map_writable_prototype_.is_null());
+ ASSERT(!strict_function_map_writable_prototype_.is_null());
// Replace function instance maps to make prototype writable.
- native_context()->set_function_map(*function_map_writable_prototype_);
- native_context()->set_strict_mode_function_map(
- *strict_mode_function_map_writable_prototype_);
+ native_context()->set_sloppy_function_map(
+ *sloppy_function_map_writable_prototype_);
+ native_context()->set_strict_function_map(
+ *strict_function_map_writable_prototype_);
}
@@ -2566,7 +2586,9 @@ class NoTrackDoubleFieldsForSerializerScope {
}
}
~NoTrackDoubleFieldsForSerializerScope() {
- FLAG_track_double_fields = flag_;
+ if (Serializer::enabled()) {
+ FLAG_track_double_fields = flag_;
+ }
}
private:
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 14dd1bd997..e683a45f04 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -73,6 +73,7 @@ class SourceCodeCache BASE_EMBEDDED {
cache_->CopyTo(0, *new_array, 0, cache_->length());
cache_ = *new_array;
Handle<String> str = factory->NewStringFromAscii(name, TENURED);
+ ASSERT(!str.is_null());
cache_->set(length, *str);
cache_->set(length + 1, *shared);
Script::cast(shared->script())->set_type(Smi::FromInt(type_));
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index e68890fcb2..689e845ba8 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -268,13 +268,12 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
int size_delta = to_trim * entry_size;
- if (heap->marking()->TransferMark(elms->address(),
- elms->address() + size_delta)) {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
+ Address new_start = elms->address() + size_delta;
+ heap->marking()->TransferMark(elms->address(), new_start);
+ heap->AdjustLiveBytes(new_start, -size_delta, Heap::FROM_MUTATOR);
- FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress(
- elms->address() + size_delta));
+ FixedArrayBase* new_elms =
+ FixedArrayBase::cast(HeapObject::FromAddress(new_start));
HeapProfiler* profiler = heap->isolate()->heap_profiler();
if (profiler->is_tracking_object_moves()) {
profiler->ObjectMoveEvent(elms->address(),
@@ -301,33 +300,35 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
}
+// Returns empty handle if not applicable.
MUST_USE_RESULT
-static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
- Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
- if (!receiver->IsJSArray()) return NULL;
- JSArray* array = JSArray::cast(receiver);
- if (array->map()->is_observed()) return NULL;
- if (!array->map()->is_extensible()) return NULL;
- HeapObject* elms = array->elements();
+static inline Handle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
+ Isolate* isolate,
+ Handle<Object> receiver,
+ Arguments* args,
+ int first_added_arg) {
+ if (!receiver->IsJSArray()) return Handle<FixedArrayBase>::null();
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ if (array->map()->is_observed()) return Handle<FixedArrayBase>::null();
+ if (!array->map()->is_extensible()) return Handle<FixedArrayBase>::null();
+ Handle<FixedArrayBase> elms(array->elements());
+ Heap* heap = isolate->heap();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
} else if (map == heap->fixed_cow_array_map()) {
- MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
- if (args == NULL || array->HasFastObjectElements() ||
- !maybe_writable_result->To(&elms)) {
- return maybe_writable_result;
- }
+ elms = JSObject::EnsureWritableFastElements(array);
+ if (args == NULL || array->HasFastObjectElements()) return elms;
} else if (map == heap->fixed_double_array_map()) {
if (args == NULL) return elms;
} else {
- return NULL;
+ return Handle<FixedArrayBase>::null();
}
// Need to ensure that the arguments passed in args can be contained in
// the array.
int args_length = args->length();
- if (first_added_arg >= args_length) return array->elements();
+ if (first_added_arg >= args_length) return handle(array->elements());
ElementsKind origin_kind = array->map()->elements_kind();
ASSERT(!IsFastObjectElementsKind(origin_kind));
@@ -346,14 +347,14 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
}
}
if (target_kind != origin_kind) {
- MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
- return array->elements();
+ JSObject::TransitionElementsKind(array, target_kind);
+ return handle(array->elements());
}
return elms;
}
+// TODO(ishell): Handlify when all Array* builtins are handlified.
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
if (!FLAG_clever_optimizations) return false;
@@ -393,23 +394,19 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
BUILTIN(ArrayPush) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
- if (maybe_elms_obj == NULL) {
- return CallJsBuiltin(isolate, "ArrayPush", args);
- }
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
+ if (elms_obj.is_null()) return CallJsBuiltin(isolate, "ArrayPush", args);
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
ElementsKind kind = array->GetElementsKind();
if (IsFastSmiOrObjectElementsKind(kind)) {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -425,16 +422,13 @@ BUILTIN(ArrayPush) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
+ Handle<FixedArray> new_elms =
+ isolate->factory()->NewUninitializedFixedArray(capacity);
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ Handle<JSObject>::null(), 0, kind, new_elms, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
elms = new_elms;
}
@@ -446,8 +440,8 @@ BUILTIN(ArrayPush) {
elms->set(index + len, args[index + 1], mode);
}
- if (elms != array->elements()) {
- array->set_elements(elms);
+ if (*elms != array->elements()) {
+ array->set_elements(*elms);
}
// Set the length.
@@ -467,25 +461,22 @@ BUILTIN(ArrayPush) {
int new_length = len + to_add;
- FixedDoubleArray* new_elms;
+ Handle<FixedDoubleArray> new_elms;
if (new_length > elms_len) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedDoubleArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
+ new_elms = isolate->factory()->NewFixedDoubleArray(capacity);
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ Handle<JSObject>::null(), 0, kind, new_elms, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
+
} else {
// to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
// empty_fixed_array.
- new_elms = FixedDoubleArray::cast(elms_obj);
+ new_elms = Handle<FixedDoubleArray>::cast(elms_obj);
}
// Add the provided values.
@@ -496,8 +487,8 @@ BUILTIN(ArrayPush) {
new_elms->set(index + len, arg->Number());
}
- if (new_elms != array->elements()) {
- array->set_elements(new_elms);
+ if (*new_elms != array->elements()) {
+ array->set_elements(*new_elms);
}
// Set the length.
@@ -507,51 +498,62 @@ BUILTIN(ArrayPush) {
}
+// TODO(ishell): Temporary wrapper until handlified.
+static bool ElementsAccessorHasElementWrapper(
+ ElementsAccessor* accessor,
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store = Handle<FixedArrayBase>::null()) {
+ return accessor->HasElement(*receiver, *holder, key,
+ backing_store.is_null() ? NULL : *backing_store);
+}
+
+
BUILTIN(ArrayPop) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
- if (!maybe_elms->To(&elms_obj)) return maybe_elms;
-
- JSArray* array = JSArray::cast(receiver);
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+ if (elms_obj.is_null()) return CallJsBuiltin(isolate, "ArrayPop", args);
+
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
- if (len == 0) return heap->undefined_value();
+ if (len == 0) return isolate->heap()->undefined_value();
ElementsAccessor* accessor = array->GetElementsAccessor();
int new_length = len - 1;
- MaybeObject* maybe_result;
- if (accessor->HasElement(array, array, new_length, elms_obj)) {
- maybe_result = accessor->Get(array, array, new_length, elms_obj);
+ Handle<Object> element;
+ if (ElementsAccessorHasElementWrapper(
+ accessor, array, array, new_length, elms_obj)) {
+ element = accessor->Get(
+ array, array, new_length, elms_obj);
} else {
- maybe_result = array->GetPrototype()->GetElement(isolate, len - 1);
+ Handle<Object> proto(array->GetPrototype(), isolate);
+ element = Object::GetElement(isolate, proto, len - 1);
}
- if (maybe_result->IsFailure()) return maybe_result;
- MaybeObject* maybe_failure =
- accessor->SetLength(array, Smi::FromInt(new_length));
- if (maybe_failure->IsFailure()) return maybe_failure;
- return maybe_result;
+ RETURN_IF_EMPTY_HANDLE(isolate, element);
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ accessor->SetLength(
+ array, handle(Smi::FromInt(new_length), isolate)));
+ return *element;
}
BUILTIN(ArrayShift) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayShift", args);
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+ if (elms_obj.is_null() ||
+ !IsJSArrayFastElementMovingAllowed(heap,
+ *Handle<JSArray>::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
@@ -559,25 +561,24 @@ BUILTIN(ArrayShift) {
// Get first element
ElementsAccessor* accessor = array->GetElementsAccessor();
- Object* first;
- MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj);
- if (!maybe_first->To(&first)) return maybe_first;
+ Handle<Object> first = accessor->Get(receiver, array, 0, elms_obj);
+ RETURN_IF_EMPTY_HANDLE(isolate, first);
if (first->IsTheHole()) {
- first = heap->undefined_value();
+ first = isolate->factory()->undefined_value();
}
- if (!heap->lo_space()->Contains(elms_obj)) {
- array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1));
+ if (!heap->CanMoveObjectStart(*elms_obj)) {
+ array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
} else {
// Shift the elements.
if (elms_obj->IsFixedArray()) {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, 0, 1, len - 1);
+ heap->MoveElements(*elms, 0, 1, len - 1);
elms->set(len - 1, heap->the_hole_value());
} else {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, 0, elms, 1, len - 1);
+ Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, 0, *elms, 1, len - 1);
elms->set_the_hole(len - 1);
}
}
@@ -585,29 +586,27 @@ BUILTIN(ArrayShift) {
// Set the length.
array->set_length(Smi::FromInt(len - 1));
- return first;
+ return *first;
}
BUILTIN(ArrayUnshift) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+ if (elms_obj.is_null() ||
+ !IsJSArrayFastElementMovingAllowed(heap,
+ *Handle<JSArray>::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
if (!array->HasFastSmiOrObjectElements()) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -616,31 +615,26 @@ BUILTIN(ArrayUnshift) {
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(&args, 1, to_add,
- DONT_ALLOW_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
+ JSObject::EnsureCanContainElements(array, &args, 1, to_add,
+ DONT_ALLOW_DOUBLE_ELEMENTS);
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_elms->To(&new_elms)) return maybe_elms;
+ Handle<FixedArray> new_elms =
+ isolate->factory()->NewUninitializedFixedArray(capacity);
ElementsKind kind = array->GetElementsKind();
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, to_add,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ Handle<JSObject>::null(), 0, kind, new_elms, to_add,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
elms = new_elms;
- array->set_elements(elms);
+ array->set_elements(*elms);
} else {
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, to_add, 0, len);
+ heap->MoveElements(*elms, to_add, 0, len);
}
// Add the provided values.
@@ -657,18 +651,19 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms;
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms;
int len = -1;
if (receiver->IsJSArray()) {
- JSArray* array = JSArray::cast(receiver);
- if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ if (!IsJSArrayFastElementMovingAllowed(heap, *array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
if (array->HasFastElements()) {
- elms = array->elements();
+ elms = handle(array->elements());
} else {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -677,33 +672,34 @@ BUILTIN(ArraySlice) {
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
// than 50% of invocations in Web apps). Treat it in C++ as well.
- Map* arguments_map =
- isolate->context()->native_context()->arguments_boilerplate()->map();
+ Handle<Map> arguments_map(isolate->context()->native_context()->
+ sloppy_arguments_boilerplate()->map());
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject() &&
- JSObject::cast(receiver)->map() == arguments_map;
+ Handle<JSObject>::cast(receiver)->map() == *arguments_map;
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- JSObject* object = JSObject::cast(receiver);
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver);
if (object->HasFastElements()) {
- elms = object->elements();
+ elms = handle(object->elements());
} else {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+ Handle<Object> len_obj(
+ object->InObjectPropertyAt(Heap::kArgumentsLengthIndex), isolate);
if (!len_obj->IsSmi()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- len = Smi::cast(len_obj)->value();
+ len = Handle<Smi>::cast(len_obj)->value();
if (len > elms->length()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
- JSObject* object = JSObject::cast(receiver);
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver);
ASSERT(len >= 0);
int n_arguments = args.length() - 1;
@@ -714,11 +710,11 @@ BUILTIN(ArraySlice) {
int relative_start = 0;
int relative_end = len;
if (n_arguments > 0) {
- Object* arg1 = args[1];
+ Handle<Object> arg1 = args.at<Object>(1);
if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
+ relative_start = Handle<Smi>::cast(arg1)->value();
} else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
+ double start = Handle<HeapNumber>::cast(arg1)->value();
if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -727,11 +723,11 @@ BUILTIN(ArraySlice) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
if (n_arguments > 1) {
- Object* arg2 = args[2];
+ Handle<Object> arg2 = args.at<Object>(2);
if (arg2->IsSmi()) {
- relative_end = Smi::cast(arg2)->value();
+ relative_end = Handle<Smi>::cast(arg2)->value();
} else if (arg2->IsHeapNumber()) {
- double end = HeapNumber::cast(arg2)->value();
+ double end = Handle<HeapNumber>::cast(arg2)->value();
if (end < kMinInt || end > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -758,7 +754,8 @@ BUILTIN(ArraySlice) {
bool packed = true;
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
for (int i = k; i < final; i++) {
- if (!accessor->HasElement(object, object, i, elms)) {
+ if (!ElementsAccessorHasElementWrapper(
+ accessor, object, object, i, elms)) {
packed = false;
break;
}
@@ -770,40 +767,31 @@ BUILTIN(ArraySlice) {
}
}
- JSArray* result_array;
- MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
- result_len,
- result_len);
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(kind, result_len, result_len);
DisallowHeapAllocation no_gc;
- if (result_len == 0) return maybe_array;
- if (!maybe_array->To(&result_array)) return maybe_array;
+ if (result_len == 0) return *result_array;
ElementsAccessor* accessor = object->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, k, kind, result_array->elements(), 0, result_len, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
-
- return result_array;
+ accessor->CopyElements(Handle<JSObject>::null(), k, kind,
+ handle(result_array->elements()), 0, result_len, elms);
+ return *result_array;
}
BUILTIN(ArraySplice) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
- if (maybe_elms == NULL) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- if (!maybe_elms->To(&elms_obj)) return maybe_elms;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
+ if (elms_obj.is_null() ||
+ !IsJSArrayFastElementMovingAllowed(heap,
+ *Handle<JSArray>::cast(receiver))) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
@@ -812,11 +800,11 @@ BUILTIN(ArraySplice) {
int relative_start = 0;
if (n_arguments > 0) {
- Object* arg1 = args[1];
+ Handle<Object> arg1 = args.at<Object>(1);
if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
+ relative_start = Handle<Smi>::cast(arg1)->value();
} else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
+ double start = Handle<HeapNumber>::cast(arg1)->value();
if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
@@ -861,72 +849,83 @@ BUILTIN(ArraySplice) {
}
if (new_length == 0) {
- MaybeObject* maybe_array = heap->AllocateJSArrayWithElements(
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
elms_obj, elements_kind, actual_delete_count);
- if (maybe_array->IsFailure()) return maybe_array;
array->set_elements(heap->empty_fixed_array());
array->set_length(Smi::FromInt(0));
- return maybe_array;
+ return *result;
}
- JSArray* result_array = NULL;
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- actual_delete_count,
- actual_delete_count);
- if (!maybe_array->To(&result_array)) return maybe_array;
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(elements_kind,
+ actual_delete_count,
+ actual_delete_count);
if (actual_delete_count > 0) {
DisallowHeapAllocation no_gc;
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, actual_start, elements_kind, result_array->elements(),
- 0, actual_delete_count, elms_obj);
- // Cannot fail since the origin and target array are of the same elements
- // kind.
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ Handle<JSObject>::null(), actual_start, elements_kind,
+ handle(result_array->elements()), 0, actual_delete_count, elms_obj);
}
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = !heap->lo_space()->Contains(elms_obj) &&
+ const bool trim_array = !heap->lo_space()->Contains(*elms_obj) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
const int delta = actual_delete_count - item_count;
if (elms_obj->IsFixedDoubleArray()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, delta, elms, 0, actual_start);
+ Handle<FixedDoubleArray> elms =
+ Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, delta, *elms, 0, actual_start);
} else {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, delta, 0, actual_start);
+ heap->MoveElements(*elms, delta, 0, actual_start);
}
- elms_obj = LeftTrimFixedArray(heap, elms_obj, delta);
-
+ if (heap->CanMoveObjectStart(*elms_obj)) {
+ // On the fast path we move the start of the object in memory.
+ elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta));
+ } else {
+ // This is the slow path. We are going to move the elements to the left
+ // by copying them. For trimmed values we store the hole.
+ if (elms_obj->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> elms =
+ Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, 0, *elms, delta, len - delta);
+ FillWithHoles(*elms, len - delta, len);
+ } else {
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(*elms, 0, delta, len - delta);
+ FillWithHoles(heap, *elms, len - delta, len);
+ }
+ }
elms_changed = true;
} else {
if (elms_obj->IsFixedDoubleArray()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
+ Handle<FixedDoubleArray> elms =
+ Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, actual_start + item_count,
+ *elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
- FillWithHoles(elms, new_length, len);
+ FillWithHoles(*elms, new_length, len);
} else {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, actual_start + item_count,
+ heap->MoveElements(*elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
- FillWithHoles(heap, elms, new_length, len);
+ FillWithHoles(heap, *elms, new_length, len);
}
}
} else if (item_count > actual_delete_count) {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
@@ -935,9 +934,8 @@ BUILTIN(ArraySplice) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
+ Handle<FixedArray> new_elms =
+ isolate->factory()->NewUninitializedFixedArray(capacity);
DisallowHeapAllocation no_gc;
@@ -945,30 +943,26 @@ BUILTIN(ArraySplice) {
ElementsAccessor* accessor = array->GetElementsAccessor();
if (actual_start > 0) {
// Copy the part before actual_start as is.
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0, actual_start, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ Handle<JSObject>::null(), 0, kind, new_elms, 0, actual_start, elms);
}
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, actual_start + actual_delete_count, kind, new_elms,
- actual_start + item_count,
+ accessor->CopyElements(
+ Handle<JSObject>::null(), actual_start + actual_delete_count, kind,
+ new_elms, actual_start + item_count,
ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
elms_obj = new_elms;
elms_changed = true;
} else {
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, actual_start + item_count,
+ heap->MoveElements(*elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
}
}
if (IsFastDoubleElementsKind(elements_kind)) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
for (int k = actual_start; k < actual_start + item_count; k++) {
Object* arg = args[3 + k - actual_start];
if (arg->IsSmi()) {
@@ -978,7 +972,7 @@ BUILTIN(ArraySplice) {
}
}
} else {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int k = actual_start; k < actual_start + item_count; k++) {
@@ -987,21 +981,22 @@ BUILTIN(ArraySplice) {
}
if (elms_changed) {
- array->set_elements(elms_obj);
+ array->set_elements(*elms_obj);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
- return result_array;
+ return *result_array;
}
BUILTIN(ArrayConcat) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Context* native_context = isolate->context()->native_context();
- JSObject* array_proto =
- JSObject::cast(native_context->array_function()->prototype());
- if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
+ Handle<Context> native_context(isolate->context()->native_context());
+ Handle<JSObject> array_proto(
+ JSObject::cast(native_context->array_function()->prototype()));
+ if (!ArrayPrototypeHasNoElements(heap, *native_context, *array_proto)) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@@ -1013,13 +1008,13 @@ BUILTIN(ArrayConcat) {
bool has_double = false;
bool is_holey = false;
for (int i = 0; i < n_arguments; i++) {
- Object* arg = args[i];
+ Handle<Object> arg = args.at<Object>(i);
if (!arg->IsJSArray() ||
- !JSArray::cast(arg)->HasFastElements() ||
- JSArray::cast(arg)->GetPrototype() != array_proto) {
+ !Handle<JSArray>::cast(arg)->HasFastElements() ||
+ Handle<JSArray>::cast(arg)->GetPrototype() != *array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
- int len = Smi::cast(JSArray::cast(arg)->length())->value();
+ int len = Smi::cast(Handle<JSArray>::cast(arg)->length())->value();
// We shouldn't overflow when adding another len.
const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
@@ -1032,7 +1027,7 @@ BUILTIN(ArrayConcat) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
- ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
+ ElementsKind arg_kind = Handle<JSArray>::cast(arg)->map()->elements_kind();
has_double = has_double || IsFastDoubleElementsKind(arg_kind);
is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
@@ -1048,34 +1043,29 @@ BUILTIN(ArrayConcat) {
ArrayStorageAllocationMode mode =
has_double && IsFastObjectElementsKind(elements_kind)
? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
- JSArray* result_array;
- // Allocate result.
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- result_len,
- result_len,
- mode);
- if (!maybe_array->To(&result_array)) return maybe_array;
- if (result_len == 0) return result_array;
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(elements_kind,
+ result_len,
+ result_len,
+ mode);
+ if (result_len == 0) return *result_array;
int j = 0;
- FixedArrayBase* storage = result_array->elements();
+ Handle<FixedArrayBase> storage(result_array->elements());
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
for (int i = 0; i < n_arguments; i++) {
- JSArray* array = JSArray::cast(args[i]);
+ Handle<JSArray> array = args.at<JSArray>(i);
int len = Smi::cast(array->length())->value();
ElementsKind from_kind = array->GetElementsKind();
if (len > 0) {
- MaybeObject* maybe_failure =
- accessor->CopyElements(array, 0, from_kind, storage, j, len);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ accessor->CopyElements(array, 0, from_kind, storage, j, len);
j += len;
}
}
ASSERT(j == result_len);
- return result_array;
+ return *result_array;
}
@@ -1174,7 +1164,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
}
SharedFunctionInfo* shared = function->shared();
- if (shared->is_classic_mode() && !shared->native()) {
+ if (shared->strict_mode() == SLOPPY && !shared->native()) {
Object* recv = args[0];
ASSERT(!recv->IsNull());
if (recv->IsUndefined()) {
@@ -1320,9 +1310,7 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
- LoadStubCompiler::GenerateLoadViaGetter(
- masm, Handle<HeapType>::null(),
- LoadStubCompiler::registers()[0], Handle<JSFunction>());
+ LoadStubCompiler::GenerateLoadViaGetterForDeopt(masm);
}
@@ -1366,8 +1354,8 @@ static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
}
-static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
- KeyedLoadIC::GenerateNonStrictArguments(masm);
+static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateSloppyArguments(masm);
}
@@ -1387,18 +1375,17 @@ static void Generate_StoreIC_Normal(MacroAssembler* masm) {
static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
- StoreStubCompiler::GenerateStoreViaSetter(
- masm, Handle<HeapType>::null(), Handle<JSFunction>());
+ StoreStubCompiler::GenerateStoreViaSetterForDeopt(masm);
}
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
+ KeyedStoreIC::GenerateGeneric(masm, SLOPPY);
}
static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, kStrictMode);
+ KeyedStoreIC::GenerateGeneric(masm, STRICT);
}
@@ -1432,8 +1419,8 @@ static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
}
-static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
- KeyedStoreIC::GenerateNonStrictArguments(masm);
+static void Generate_KeyedStoreIC_SloppyArguments(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateSloppyArguments(masm);
}
@@ -1599,9 +1586,7 @@ void Builtins::InitBuiltinFunctionTable() {
functions->c_code = NULL; \
functions->s_name = #aname; \
functions->name = k##aname; \
- functions->flags = Code::ComputeFlags( \
- Code::HANDLER, MONOMORPHIC, kNoExtraICState, \
- Code::NORMAL, Code::kind); \
+ functions->flags = Code::ComputeHandlerFlags(Code::kind); \
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
@@ -1627,7 +1612,9 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
- union { int force_alignment; byte buffer[8*KB]; } u;
+ // TODO(jbramley): I had to increase the size of this buffer from 8KB because
+ // we can generate a lot of debug code on ARM64.
+ union { int force_alignment; byte buffer[16*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
@@ -1650,7 +1637,7 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
{
// During startup it's OK to always allocate and defer GC to later.
// This simplifies things because we don't need to retry.
- AlwaysAllocateScope __scope__;
+ AlwaysAllocateScope __scope__(isolate);
{ MaybeObject* maybe_code =
heap->CreateCode(desc, flags, masm.CodeObject());
if (!maybe_code->ToObject(&code)) {
@@ -1712,12 +1699,12 @@ const char* Builtins::Lookup(byte* pc) {
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
+ masm->TailCallRuntime(Runtime::kHiddenInterrupt, 0, 1);
}
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
+ masm->TailCallRuntime(Runtime::kHiddenStackGuard, 0, 1);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index d977a4817c..88cfd53f48 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -137,7 +137,7 @@ enum BuiltinExtraArguments {
kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \
kNoExtraICState) \
- V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MONOMORPHIC, \
+ V(KeyedLoadIC_SloppyArguments, KEYED_LOAD_IC, MONOMORPHIC, \
kNoExtraICState) \
\
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
@@ -156,8 +156,8 @@ enum BuiltinExtraArguments {
StoreIC::kStrictModeState) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
StoreIC::kStrictModeState) \
- V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
- kNoExtraICState) \
+ V(KeyedStoreIC_SloppyArguments, KEYED_STORE_IC, MONOMORPHIC, \
+ kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index 767ad6513a..f52feda6c1 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -66,6 +66,27 @@ struct IdentifierPart {
}
};
+
+// WhiteSpace according to ECMA-262 5.1, 7.2.
+struct WhiteSpace {
+ static inline bool Is(uc32 c) {
+ return c == 0x0009 || // <TAB>
+ c == 0x000B || // <VT>
+ c == 0x000C || // <FF>
+ c == 0xFEFF || // <BOM>
+ // \u0020 and \u00A0 are included in unibrow::WhiteSpace.
+ unibrow::WhiteSpace::Is(c);
+ }
+};
+
+
+// WhiteSpace and LineTerminator according to ECMA-262 5.1, 7.2 and 7.3.
+struct WhiteSpaceOrLineTerminator {
+ static inline bool Is(uc32 c) {
+ return WhiteSpace::Is(c) || unibrow::LineTerminator::Is(c);
+ }
+};
+
} } // namespace v8::internal
#endif // V8_CHAR_PREDICATES_H_
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
index 62e04ff205..3a2de28a2e 100644
--- a/deps/v8/src/checks.cc
+++ b/deps/v8/src/checks.cc
@@ -38,30 +38,34 @@
#include "platform.h"
#include "v8.h"
+namespace v8 {
+namespace internal {
+
+intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
// Attempts to dump a backtrace (if supported).
-static V8_INLINE void DumpBacktrace() {
+void DumpBacktrace() {
#if V8_LIBC_GLIBC || V8_OS_BSD
void* trace[100];
int size = backtrace(trace, ARRAY_SIZE(trace));
char** symbols = backtrace_symbols(trace, size);
- i::OS::PrintError("\n==== C stack trace ===============================\n\n");
+ OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
- i::OS::PrintError("(empty)\n");
+ OS::PrintError("(empty)\n");
} else if (symbols == NULL) {
- i::OS::PrintError("(no symbols)\n");
+ OS::PrintError("(no symbols)\n");
} else {
for (int i = 1; i < size; ++i) {
- i::OS::PrintError("%2d: ", i);
+ OS::PrintError("%2d: ", i);
char mangled[201];
if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
int status;
size_t length;
char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
- i::OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+ OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
free(demangled);
} else {
- i::OS::PrintError("??\n");
+ OS::PrintError("??\n");
}
}
}
@@ -73,22 +77,24 @@ static V8_INLINE void DumpBacktrace() {
bt_init_accessor(&acc, BT_SELF);
bt_load_memmap(&acc, &memmap);
bt_sprn_memmap(&memmap, out, sizeof(out));
- i::OS::PrintError(out);
+ OS::PrintError(out);
bt_addr_t trace[100];
int size = bt_get_backtrace(&acc, trace, ARRAY_SIZE(trace));
- i::OS::PrintError("\n==== C stack trace ===============================\n\n");
+ OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
- i::OS::PrintError("(empty)\n");
+ OS::PrintError("(empty)\n");
} else {
bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
out, sizeof(out), NULL);
- i::OS::PrintError(out);
+ OS::PrintError(out);
}
bt_unload_memmap(&memmap);
bt_release_accessor(&acc);
#endif // V8_LIBC_GLIBC || V8_OS_BSD
}
+} } // namespace v8::internal
+
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
@@ -102,7 +108,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n");
- DumpBacktrace();
+ v8::internal::DumpBacktrace();
fflush(stderr);
i::OS::Abort();
}
@@ -136,10 +142,3 @@ void CheckNonEqualsHelper(const char* file,
unexpected_source, value_source, *value_str);
}
}
-
-
-namespace v8 { namespace internal {
-
- intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index f7b145fc8a..e53475a0a4 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -34,6 +34,7 @@
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product.
#ifdef DEBUG
@@ -51,6 +52,23 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
#define UNREACHABLE() ((void) 0)
#endif
+// Simulator specific helpers.
+#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_ARM64)
+ // TODO(all): If possible automatically prepend an indicator like
+ // UNIMPLEMENTED or LOCATION.
+ #define ASM_UNIMPLEMENTED(message) \
+ __ Debug(message, __LINE__, NO_PARAM)
+ #define ASM_UNIMPLEMENTED_BREAK(message) \
+ __ Debug(message, __LINE__, \
+ FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
+ #define ASM_LOCATION(message) \
+ __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
+#else
+ #define ASM_UNIMPLEMENTED(message)
+ #define ASM_UNIMPLEMENTED_BREAK(message)
+ #define ASM_LOCATION(message)
+#endif
+
// The CHECK macro checks that the given condition is true; if not, it
// prints a message to stderr and aborts.
@@ -288,8 +306,12 @@ extern bool FLAG_enable_slow_asserts;
#define SLOW_ASSERT(condition) ((void) 0)
const bool FLAG_enable_slow_asserts = false;
#endif
-} // namespace internal
-} // namespace v8
+
+// Exposed for making debugging easier (to see where your function is being
+// called, just add a call to DumpBacktrace).
+void DumpBacktrace();
+
+} } // namespace v8::internal
// The ASSERT macro is equivalent to CHECK except that it only
diff --git a/deps/v8/src/circular-queue.h b/deps/v8/src/circular-queue.h
index 94bc89e7df..71ef38322f 100644
--- a/deps/v8/src/circular-queue.h
+++ b/deps/v8/src/circular-queue.h
@@ -28,6 +28,7 @@
#ifndef V8_CIRCULAR_QUEUE_H_
#define V8_CIRCULAR_QUEUE_H_
+#include "atomicops.h"
#include "v8globals.h"
namespace v8 {
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 455d087684..040c260133 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -81,6 +81,11 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); }
+ HLoadNamedField* BuildLoadNamedField(HValue* object,
+ Representation representation,
+ int offset,
+ bool is_inobject);
+
enum ArgumentClass {
NONE,
SINGLE,
@@ -93,9 +98,20 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* BuildInternalArrayConstructor(ElementsKind kind,
ArgumentClass argument_class);
- void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
- HValue* code_object);
+ // BuildCheckAndInstallOptimizedCode emits code to install the optimized
+ // function found in the optimized code map at map_index in js_function, if
+ // the function at map_index matches the given native_context. Builder is
+ // left in the "Then()" state after the install.
+ void BuildCheckAndInstallOptimizedCode(HValue* js_function,
+ HValue* native_context,
+ IfBuilder* builder,
+ HValue* optimized_map,
+ HValue* map_index);
void BuildInstallCode(HValue* js_function, HValue* shared_info);
+
+ HInstruction* LoadFromOptimizedCodeMap(HValue* optimized_map,
+ HValue* iterator,
+ int field_offset);
void BuildInstallFromOptimizedCodeMap(HValue* js_function,
HValue* shared_info,
HValue* native_context);
@@ -247,8 +263,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType(),
- GetStubFlags());
+ GetStubType());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -530,15 +545,11 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
object);
- // We use a hammer (SkipWriteBarrier()) to indicate that we know the input
- // cell is really a Cell, and so no write barrier is needed.
- // TODO(mvstanton): Add a debug_code check to verify the input cell is really
- // a cell. (perhaps with a new instruction, HAssert).
- HInstruction* cell = GetParameter(0);
- HObjectAccess access = HObjectAccess::ForCellValue();
- store = Add<HStoreNamedField>(cell, access, object);
- store->SkipWriteBarrier();
- return cell;
+ HInstruction* feedback_vector = GetParameter(0);
+ HInstruction* slot = GetParameter(1);
+ Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
+ INITIALIZING_STORE);
+ return feedback_vector;
}
@@ -552,7 +563,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- false, NEVER_RETURN_HOLE, STANDARD_STORE);
+ LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
@@ -562,14 +573,32 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) {
}
+HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
+ HValue* object,
+ Representation representation,
+ int offset,
+ bool is_inobject) {
+ HObjectAccess access = is_inobject
+ ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
+ : HObjectAccess::ForBackingStoreOffset(offset, representation);
+ if (representation.IsDouble()) {
+ // Load the heap number.
+ object = Add<HLoadNamedField>(
+ object, static_cast<HValue*>(NULL),
+ access.WithRepresentation(Representation::Tagged()));
+ // Load the double value from it.
+ access = HObjectAccess::ForHeapNumberValue();
+ }
+ return Add<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
+}
+
+
template<>
HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
- Representation rep = casted_stub()->representation();
- int offset = casted_stub()->offset();
- HObjectAccess access = casted_stub()->is_inobject() ?
- HObjectAccess::ForObservableJSObjectOffset(offset, rep) :
- HObjectAccess::ForBackingStoreOffset(offset, rep);
- return AddLoadNamedField(GetParameter(0), access);
+ return BuildLoadNamedField(GetParameter(0),
+ casted_stub()->representation(),
+ casted_stub()->offset(),
+ casted_stub()->is_inobject());
}
@@ -579,17 +608,15 @@ Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) {
template<>
-HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
- Representation rep = casted_stub()->representation();
- int offset = casted_stub()->offset();
- HObjectAccess access = casted_stub()->is_inobject() ?
- HObjectAccess::ForObservableJSObjectOffset(offset, rep) :
- HObjectAccess::ForBackingStoreOffset(offset, rep);
- return AddLoadNamedField(GetParameter(0), access);
+HValue* CodeStubGraphBuilder<StringLengthStub>::BuildCodeStub() {
+ HValue* string = BuildLoadNamedField(
+ GetParameter(0), Representation::Tagged(), JSValue::kValueOffset, true);
+ return BuildLoadNamedField(
+ string, Representation::Tagged(), String::kLengthOffset, true);
}
-Handle<Code> KeyedLoadFieldStub::GenerateCode(Isolate* isolate) {
+Handle<Code> StringLengthStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@@ -599,7 +626,7 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2),
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
+ STORE, NEVER_RETURN_HOLE, casted_stub()->store_mode());
return GetParameter(2);
}
@@ -914,7 +941,7 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
// If we encounter a generic argument, the number conversion is
// observable, thus we cannot afford to bail out after the fact.
if (!state.HasSideEffects()) {
- if (result_type->Is(Type::Smi())) {
+ if (result_type->Is(Type::SignedSmall())) {
if (state.op() == Token::SHR) {
// TODO(olivf) Replace this by a SmiTagU Instruction.
// 0x40000000: this number would convert to negative when interpreting
@@ -1033,13 +1060,16 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Handle<PropertyCell> placeholder_cell =
isolate()->factory()->NewPropertyCell(placeholer_value);
- HParameter* receiver = GetParameter(0);
HParameter* value = GetParameter(2);
- // Check that the map of the global has not changed: use a placeholder map
- // that will be replaced later with the global object's map.
- Handle<Map> placeholder_map = isolate()->factory()->meta_map();
- Add<HCheckMaps>(receiver, placeholder_map, top_info());
+ if (stub->check_global()) {
+ // Check that the map of the global has not changed: use a placeholder map
+ // that will be replaced later with the global object's map.
+ Handle<Map> placeholder_map = isolate()->factory()->meta_map();
+ HValue* global = Add<HConstant>(
+ StoreGlobalStub::global_placeholder(isolate()));
+ Add<HCheckMaps>(global, placeholder_map, top_info());
+ }
HValue* cell = Add<HConstant>(placeholder_cell);
HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
@@ -1096,7 +1126,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(object, key, value,
casted_stub()->is_jsarray(),
casted_stub()->to_kind(),
- true, ALLOW_RETURN_HOLE,
+ STORE, ALLOW_RETURN_HOLE,
casted_stub()->store_mode());
}
@@ -1109,10 +1139,27 @@ Handle<Code> ElementsTransitionAndStoreStub::GenerateCode(Isolate* isolate) {
}
-void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
+void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
HValue* js_function,
HValue* native_context,
- HValue* code_object) {
+ IfBuilder* builder,
+ HValue* optimized_map,
+ HValue* map_index) {
+ HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
+ HValue* context_slot = LoadFromOptimizedCodeMap(
+ optimized_map, map_index, SharedFunctionInfo::kContextOffset);
+ HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
+ optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
+ builder->If<HCompareObjectEqAndBranch>(native_context,
+ context_slot);
+ builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
+ builder->Then();
+ HValue* code_object = LoadFromOptimizedCodeMap(optimized_map,
+ map_index, SharedFunctionInfo::kCachedCodeOffset);
+ // and the literals
+ HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
+ map_index, SharedFunctionInfo::kLiteralsOffset);
+
Counters* counters = isolate()->counters();
AddIncrementCounter(counters->fast_new_closure_install_optimized());
@@ -1120,6 +1167,8 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
// map and either unmangle them on marking or do nothing as the whole map is
// discarded on major GC anyway.
Add<HStoreCodeEntry>(js_function, code_object);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
+ literals);
// Now link a function into a list of optimized functions.
HValue* optimized_functions_list = Add<HLoadNamedField>(
@@ -1133,6 +1182,8 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
Add<HStoreNamedField>(native_context,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
js_function);
+
+ // The builder continues in the "then" after this function.
}
@@ -1147,6 +1198,24 @@ void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
}
+HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
+ HValue* optimized_map,
+ HValue* iterator,
+ int field_offset) {
+ // By making sure to express these loads in the form [<hvalue> + constant]
+ // the keyed load can be hoisted.
+ ASSERT(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength);
+ HValue* field_slot = iterator;
+ if (field_offset > 0) {
+ HValue* field_offset_value = Add<HConstant>(field_offset);
+ field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
+ }
+ HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
+ static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ return field_entry;
+}
+
+
void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* js_function,
HValue* shared_info,
@@ -1168,28 +1237,19 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
// optimized_map points to fixed array of 3-element entries
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
- Label install_optimized;
- HValue* first_context_slot = Add<HLoadNamedField>(
- optimized_map, static_cast<HValue*>(NULL),
- HObjectAccess::ForFirstContextSlot());
- HValue* first_osr_ast_slot = Add<HLoadNamedField>(
- optimized_map, static_cast<HValue*>(NULL),
- HObjectAccess::ForFirstOsrAstIdSlot());
- HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
+ HValue* first_entry_index =
+ Add<HConstant>(SharedFunctionInfo::kEntriesStart);
IfBuilder already_in(this);
- already_in.If<HCompareObjectEqAndBranch>(native_context,
- first_context_slot);
- already_in.AndIf<HCompareObjectEqAndBranch>(first_osr_ast_slot,
- osr_ast_id_none);
- already_in.Then();
- {
- HValue* code_object = Add<HLoadNamedField>(
- optimized_map, static_cast<HValue*>(NULL),
- HObjectAccess::ForFirstCodeSlot());
- BuildInstallOptimizedCode(js_function, native_context, code_object);
- }
+ BuildCheckAndInstallOptimizedCode(js_function, native_context, &already_in,
+ optimized_map, first_entry_index);
already_in.Else();
{
+ // Iterate through the rest of map backwards. Do not double check first
+ // entry. After the loop, if no matching optimized code was found,
+ // install unoptimized code.
+ // for(i = map.length() - SharedFunctionInfo::kEntryLength;
+ // i > SharedFunctionInfo::kEntriesStart;
+ // i -= SharedFunctionInfo::kEntryLength) { .. }
HValue* shared_function_entry_length =
Add<HConstant>(SharedFunctionInfo::kEntryLength);
LoopBuilder loop_builder(this,
@@ -1199,63 +1259,34 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* array_length = Add<HLoadNamedField>(
optimized_map, static_cast<HValue*>(NULL),
HObjectAccess::ForFixedArrayLength());
- HValue* slot_iterator = loop_builder.BeginBody(array_length,
- graph()->GetConstant0(),
- Token::GT);
+ HValue* start_pos = AddUncasted<HSub>(array_length,
+ shared_function_entry_length);
+ HValue* slot_iterator = loop_builder.BeginBody(start_pos,
+ first_entry_index,
+ Token::GT);
{
- // Iterate through the rest of map backwards.
- // Do not double check first entry.
- HValue* second_entry_index =
- Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
- IfBuilder restore_check(this);
- restore_check.If<HCompareNumericAndBranch>(
- slot_iterator, second_entry_index, Token::EQ);
- restore_check.Then();
- {
- // Store the unoptimized code
- BuildInstallCode(js_function, shared_info);
- loop_builder.Break();
- }
- restore_check.Else();
- {
- STATIC_ASSERT(SharedFunctionInfo::kContextOffset == 0);
- STATIC_ASSERT(SharedFunctionInfo::kEntryLength -
- SharedFunctionInfo::kOsrAstIdOffset == 1);
- HValue* native_context_slot = AddUncasted<HSub>(
- slot_iterator, shared_function_entry_length);
- HValue* osr_ast_id_slot = AddUncasted<HSub>(
- slot_iterator, graph()->GetConstant1());
- HInstruction* native_context_entry = Add<HLoadKeyed>(optimized_map,
- native_context_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
- HInstruction* osr_ast_id_entry = Add<HLoadKeyed>(optimized_map,
- osr_ast_id_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
- IfBuilder done_check(this);
- done_check.If<HCompareObjectEqAndBranch>(native_context,
- native_context_entry);
- done_check.AndIf<HCompareObjectEqAndBranch>(osr_ast_id_entry,
- osr_ast_id_none);
- done_check.Then();
- {
- // Hit: fetch the optimized code.
- HValue* code_slot = AddUncasted<HAdd>(
- native_context_slot, graph()->GetConstant1());
- HValue* code_object = Add<HLoadKeyed>(optimized_map,
- code_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
- BuildInstallOptimizedCode(js_function, native_context, code_object);
-
- // Fall out of the loop
- loop_builder.Break();
- }
- done_check.Else();
- done_check.End();
- }
- restore_check.End();
+ IfBuilder done_check(this);
+ BuildCheckAndInstallOptimizedCode(js_function, native_context,
+ &done_check,
+ optimized_map,
+ slot_iterator);
+ // Fall out of the loop
+ loop_builder.Break();
}
loop_builder.EndBody();
+
+ // If slot_iterator equals first entry index, then we failed to find and
+ // install optimized code
+ IfBuilder no_optimized_code_check(this);
+ no_optimized_code_check.If<HCompareNumericAndBranch>(
+ slot_iterator, first_entry_index, Token::EQ);
+ no_optimized_code_check.Then();
+ {
+ // Store the unoptimized code
+ BuildInstallCode(js_function, shared_info);
+ }
}
- already_in.End();
}
- is_optimized.End();
}
@@ -1274,7 +1305,7 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
NOT_TENURED, JS_FUNCTION_TYPE);
- int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
+ int map_index = Context::FunctionMapIndex(casted_stub()->strict_mode(),
casted_stub()->is_generator());
// Compute the function map in the current native context and set that
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index d86bc70dcf..06203629ae 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -86,9 +86,11 @@ Code::Kind CodeStub::GetCodeKind() const {
}
-Handle<Code> CodeStub::GetCodeCopyFromTemplate(Isolate* isolate) {
+Handle<Code> CodeStub::GetCodeCopy(Isolate* isolate,
+ const Code::FindAndReplacePattern& pattern) {
Handle<Code> ic = GetCode(isolate);
ic = isolate->factory()->CopyCode(ic);
+ ic->FindAndReplace(pattern);
RecordCodeGeneration(*ic, isolate);
return ic;
}
@@ -119,8 +121,7 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType(),
- GetStubFlags());
+ GetStubType());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -562,7 +563,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case DICTIONARY_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -573,8 +574,8 @@ void ArgumentsAccessStub::PrintName(StringStream* stream) {
stream->Add("ArgumentsAccessStub_");
switch (type_) {
case READ_ELEMENT: stream->Add("ReadElement"); break;
- case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
- case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
+ case NEW_SLOPPY_FAST: stream->Add("NewSloppyFast"); break;
+ case NEW_SLOPPY_SLOW: stream->Add("NewSloppySlow"); break;
case NEW_STRICT: stream->Add("NewStrict"); break;
}
}
@@ -737,7 +738,7 @@ void NumberToStringStub::InstallDescriptors(Isolate* isolate) {
void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
- FastNewClosureStub stub(STRICT_MODE, false);
+ FastNewClosureStub stub(STRICT, false);
InstallDescriptor(isolate, &stub);
}
@@ -749,6 +750,14 @@ void FastNewContextStub::InstallDescriptors(Isolate* isolate) {
// static
+void FastCloneShallowArrayStub::InstallDescriptors(Isolate* isolate) {
+ FastCloneShallowArrayStub stub(FastCloneShallowArrayStub::CLONE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE, 0);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
void BinaryOpICStub::InstallDescriptors(Isolate* isolate) {
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
InstallDescriptor(isolate, &stub);
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 8d283d9e39..5a88942330 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -51,9 +51,7 @@ namespace internal {
V(CompareIC) \
V(CompareNilIC) \
V(MathPow) \
- V(StringLength) \
V(FunctionPrototype) \
- V(StoreArrayLength) \
V(RecordWrite) \
V(StoreBufferOverflow) \
V(RegExpExec) \
@@ -98,9 +96,11 @@ namespace internal {
V(CallApiGetter) \
/* IC Handler stubs */ \
V(LoadField) \
- V(KeyedLoadField)
+ V(KeyedLoadField) \
+ V(StringLength) \
+ V(KeyedStringLength)
-// List of code stubs only used on ARM platforms.
+// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
@@ -111,6 +111,19 @@ namespace internal {
#define CODE_STUB_LIST_ARM(V)
#endif
+// List of code stubs only used on ARM 64 bits platforms.
+#if V8_TARGET_ARCH_ARM64
+#define CODE_STUB_LIST_ARM64(V) \
+ V(GetProperty) \
+ V(SetProperty) \
+ V(InvokeBuiltin) \
+ V(DirectCEntry) \
+ V(StoreRegistersState) \
+ V(RestoreRegistersState)
+#else
+#define CODE_STUB_LIST_ARM64(V)
+#endif
+
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
@@ -126,6 +139,7 @@ namespace internal {
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V) \
+ CODE_STUB_LIST_ARM64(V) \
CODE_STUB_LIST_MIPS(V)
// Stub is base classes of all stubs.
@@ -144,7 +158,9 @@ class CodeStub BASE_EMBEDDED {
Handle<Code> GetCode(Isolate* isolate);
// Retrieve the code for the stub, make and return a copy of the code.
- Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate);
+ Handle<Code> GetCodeCopy(
+ Isolate* isolate, const Code::FindAndReplacePattern& pattern);
+
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
}
@@ -188,9 +204,6 @@ class CodeStub BASE_EMBEDDED {
virtual Code::StubType GetStubType() {
return Code::NORMAL;
}
- virtual int GetStubFlags() {
- return -1;
- }
virtual void PrintName(StringStream* stream);
@@ -442,6 +455,8 @@ class RuntimeCallHelper {
#include "ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/code-stubs-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/code-stubs-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -487,6 +502,13 @@ class ToNumberStub: public HydrogenCodeStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
+ static void InstallDescriptors(Isolate* isolate) {
+ ToNumberStub stub;
+ stub.InitializeInterfaceDescriptor(
+ isolate,
+ isolate->code_stub_interface_descriptor(CodeStub::ToNumber));
+ }
+
private:
Major MajorKey() { return ToNumber; }
int NotMissMinorKey() { return 0; }
@@ -516,8 +538,8 @@ class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
class FastNewClosureStub : public HydrogenCodeStub {
public:
- explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
- : language_mode_(language_mode),
+ explicit FastNewClosureStub(StrictMode strict_mode, bool is_generator)
+ : strict_mode_(strict_mode),
is_generator_(is_generator) { }
virtual Handle<Code> GenerateCode(Isolate* isolate);
@@ -528,7 +550,7 @@ class FastNewClosureStub : public HydrogenCodeStub {
static void InstallDescriptors(Isolate* isolate);
- LanguageMode language_mode() const { return language_mode_; }
+ StrictMode strict_mode() const { return strict_mode_; }
bool is_generator() const { return is_generator_; }
private:
@@ -537,11 +559,11 @@ class FastNewClosureStub : public HydrogenCodeStub {
Major MajorKey() { return FastNewClosure; }
int NotMissMinorKey() {
- return StrictModeBits::encode(language_mode_ != CLASSIC_MODE) |
+ return StrictModeBits::encode(strict_mode_ == STRICT) |
IsGeneratorBits::encode(is_generator_);
}
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
bool is_generator_;
};
@@ -625,6 +647,8 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
+ static void InstallDescriptors(Isolate* isolate);
+
private:
Mode mode_;
AllocationSiteMode allocation_site_mode_;
@@ -651,8 +675,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
- explicit FastCloneShallowObjectStub(int length)
- : length_(length) {
+ explicit FastCloneShallowObjectStub(int length) : length_(length) {
ASSERT_GE(length_, 0);
ASSERT_LE(length_, kMaximumClonedProperties);
}
@@ -826,20 +849,9 @@ class FunctionPrototypeStub: public ICStub {
};
-class StringLengthStub: public ICStub {
- public:
- explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { }
- virtual void Generate(MacroAssembler* masm);
-
- private:
- STATIC_ASSERT(KindBits::kSize == 4);
- virtual CodeStub::Major MajorKey() { return StringLength; }
-};
-
-
class StoreICStub: public ICStub {
public:
- StoreICStub(Code::Kind kind, StrictModeFlag strict_mode)
+ StoreICStub(Code::Kind kind, StrictMode strict_mode)
: ICStub(kind), strict_mode_(strict_mode) { }
protected:
@@ -854,18 +866,7 @@ class StoreICStub: public ICStub {
return KindBits::encode(kind()) | StrictModeBits::encode(strict_mode_);
}
- StrictModeFlag strict_mode_;
-};
-
-
-class StoreArrayLengthStub: public StoreICStub {
- public:
- explicit StoreArrayLengthStub(Code::Kind kind, StrictModeFlag strict_mode)
- : StoreICStub(kind, strict_mode) { }
- virtual void Generate(MacroAssembler* masm);
-
- private:
- virtual CodeStub::Major MajorKey() { return StoreArrayLength; }
+ StrictMode strict_mode_;
};
@@ -883,7 +884,7 @@ class HICStub: public HydrogenCodeStub {
class HandlerStub: public HICStub {
public:
virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
- virtual int GetStubFlags() { return kind(); }
+ virtual ExtraICState GetExtraICState() { return kind(); }
protected:
HandlerStub() : HICStub() { }
@@ -937,11 +938,10 @@ class LoadFieldStub: public HandlerStub {
bool inobject,
int index,
Representation representation) {
- bool unboxed_double = FLAG_track_double_fields && representation.IsDouble();
bit_field_ = KindBits::encode(kind)
| InobjectBits::encode(inobject)
| IndexBits::encode(index)
- | UnboxedDoubleBits::encode(unboxed_double);
+ | UnboxedDoubleBits::encode(representation.IsDouble());
}
private:
@@ -953,22 +953,69 @@ class LoadFieldStub: public HandlerStub {
};
+class StringLengthStub: public HandlerStub {
+ public:
+ explicit StringLengthStub() : HandlerStub() {
+ Initialize(Code::LOAD_IC);
+ }
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ protected:
+ virtual Code::Kind kind() const {
+ return KindBits::decode(bit_field_);
+ }
+
+ void Initialize(Code::Kind kind) {
+ bit_field_ = KindBits::encode(kind);
+ }
+
+ private:
+ virtual CodeStub::Major MajorKey() { return StringLength; }
+};
+
+
+class KeyedStringLengthStub: public StringLengthStub {
+ public:
+ explicit KeyedStringLengthStub() : StringLengthStub() {
+ Initialize(Code::KEYED_LOAD_IC);
+ }
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ virtual CodeStub::Major MajorKey() { return KeyedStringLength; }
+};
+
+
class StoreGlobalStub : public HandlerStub {
public:
- explicit StoreGlobalStub(bool is_constant) {
- bit_field_ = IsConstantBits::encode(is_constant);
+ explicit StoreGlobalStub(bool is_constant, bool check_global) {
+ bit_field_ = IsConstantBits::encode(is_constant) |
+ CheckGlobalBits::encode(check_global);
+ }
+
+ static Handle<HeapObject> global_placeholder(Isolate* isolate) {
+ return isolate->factory()->uninitialized_value();
}
Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
- Map* receiver_map,
- PropertyCell* cell) {
- Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
- // Replace the placeholder cell and global object map with the actual global
- // cell and receiver map.
- Map* cell_map = isolate->heap()->global_property_cell_map();
- code->ReplaceNthObject(1, cell_map, cell);
- code->ReplaceNthObject(1, isolate->heap()->meta_map(), receiver_map);
- return code;
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell) {
+ if (check_global()) {
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(Handle<Map>(global_placeholder(isolate)->map()), global);
+ pattern.Add(isolate->factory()->meta_map(), Handle<Map>(global->map()));
+ pattern.Add(isolate->factory()->global_property_cell_map(), cell);
+ return CodeStub::GetCodeCopy(isolate, pattern);
+ } else {
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate->factory()->global_property_cell_map(), cell);
+ return CodeStub::GetCodeCopy(isolate, pattern);
+ }
}
virtual Code::Kind kind() const { return Code::STORE_IC; }
@@ -979,11 +1026,12 @@ class StoreGlobalStub : public HandlerStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
- virtual ExtraICState GetExtraICState() { return bit_field_; }
-
- bool is_constant() {
+ bool is_constant() const {
return IsConstantBits::decode(bit_field_);
}
+ bool check_global() const {
+ return CheckGlobalBits::decode(bit_field_);
+ }
void set_is_constant(bool value) {
bit_field_ = IsConstantBits::update(bit_field_, value);
}
@@ -996,13 +1044,11 @@ class StoreGlobalStub : public HandlerStub {
}
private:
- virtual int NotMissMinorKey() { return GetExtraICState(); }
Major MajorKey() { return StoreGlobal; }
class IsConstantBits: public BitField<bool, 0, 1> {};
class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
-
- int bit_field_;
+ class CheckGlobalBits: public BitField<bool, 9, 1> {};
DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
};
@@ -1010,13 +1056,14 @@ class StoreGlobalStub : public HandlerStub {
class CallApiFunctionStub : public PlatformCodeStub {
public:
- CallApiFunctionStub(bool restore_context,
+ CallApiFunctionStub(bool is_store,
bool call_data_undefined,
int argc) {
bit_field_ =
- RestoreContextBits::encode(restore_context) |
+ IsStoreBits::encode(is_store) |
CallDataUndefinedBits::encode(call_data_undefined) |
ArgumentBits::encode(argc);
+ ASSERT(!is_store || argc == 1);
}
private:
@@ -1024,7 +1071,7 @@ class CallApiFunctionStub : public PlatformCodeStub {
virtual Major MajorKey() V8_OVERRIDE { return CallApiFunction; }
virtual int MinorKey() V8_OVERRIDE { return bit_field_; }
- class RestoreContextBits: public BitField<bool, 0, 1> {};
+ class IsStoreBits: public BitField<bool, 0, 1> {};
class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
class ArgumentBits: public BitField<int, 2, Code::kArgumentsBits> {};
@@ -1058,8 +1105,6 @@ class KeyedLoadFieldStub: public LoadFieldStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
- virtual Handle<Code> GenerateCode(Isolate* isolate);
-
private:
virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
};
@@ -1155,10 +1200,9 @@ class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub {
Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
Handle<AllocationSite> allocation_site) {
- Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
- // Replace the placeholder oddball with the actual allocation site.
- code->ReplaceNthObject(1, isolate->heap()->oddball_map(), *allocation_site);
- return code;
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate->factory()->oddball_map(), allocation_site);
+ return CodeStub::GetCodeCopy(isolate, pattern);
}
virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
@@ -1368,7 +1412,7 @@ class CompareNilICStub : public HydrogenCodeStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
- static void InitializeForIsolate(Isolate* isolate) {
+ static void InstallDescriptors(Isolate* isolate) {
CompareNilICStub compare_stub(kNullValue, UNINITIALIZED);
compare_stub.InitializeInterfaceDescriptor(
isolate,
@@ -1466,7 +1510,6 @@ class CEntryStub : public PlatformCodeStub {
void GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope);
@@ -1520,8 +1563,8 @@ class ArgumentsAccessStub: public PlatformCodeStub {
public:
enum Type {
READ_ELEMENT,
- NEW_NON_STRICT_FAST,
- NEW_NON_STRICT_SLOW,
+ NEW_SLOPPY_FAST,
+ NEW_SLOPPY_SLOW,
NEW_STRICT
};
@@ -1536,8 +1579,8 @@ class ArgumentsAccessStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewStrict(MacroAssembler* masm);
- void GenerateNewNonStrictFast(MacroAssembler* masm);
- void GenerateNewNonStrictSlow(MacroAssembler* masm);
+ void GenerateNewSloppyFast(MacroAssembler* masm);
+ void GenerateNewSloppySlow(MacroAssembler* masm);
virtual void PrintName(StringStream* stream);
};
@@ -1866,23 +1909,21 @@ class DoubleToIStub : public PlatformCodeStub {
int offset,
bool is_truncating,
bool skip_fastpath = false) : bit_field_(0) {
- bit_field_ = SourceRegisterBits::encode(source.code_) |
- DestinationRegisterBits::encode(destination.code_) |
+ bit_field_ = SourceRegisterBits::encode(source.code()) |
+ DestinationRegisterBits::encode(destination.code()) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath) |
SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
- CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
+ CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
}
Register source() {
- Register result = { SourceRegisterBits::decode(bit_field_) };
- return result;
+ return Register::from_code(SourceRegisterBits::decode(bit_field_));
}
Register destination() {
- Register result = { DestinationRegisterBits::decode(bit_field_) };
- return result;
+ return Register::from_code(DestinationRegisterBits::decode(bit_field_));
}
bool is_truncating() {
@@ -2334,7 +2375,7 @@ class ToBooleanStub: public HydrogenCodeStub {
virtual bool SometimesSetsUpAFrame() { return false; }
- static void InitializeForIsolate(Isolate* isolate) {
+ static void InstallDescriptors(Isolate* isolate) {
ToBooleanStub stub;
stub.InitializeInterfaceDescriptor(
isolate,
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 13ce2218df..ea0ead3104 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -165,6 +165,8 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
function->debug_name()->ToCString().get(), tracing_scope.file());
}
PrintF(tracing_scope.file(), "--- Optimized code ---\n");
+ PrintF(tracing_scope.file(),
+ "optimization_id = %d\n", info->optimization_id());
} else {
PrintF(tracing_scope.file(), "--- Code ---\n");
}
@@ -220,11 +222,11 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
case READ_ELEMENT:
GenerateReadElement(masm);
break;
- case NEW_NON_STRICT_FAST:
- GenerateNewNonStrictFast(masm);
+ case NEW_SLOPPY_FAST:
+ GenerateNewSloppyFast(masm);
break;
- case NEW_NON_STRICT_SLOW:
- GenerateNewNonStrictSlow(masm);
+ case NEW_SLOPPY_SLOW:
+ GenerateNewSloppySlow(masm);
break;
case NEW_STRICT:
GenerateNewStrict(masm);
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 8bd4302662..6b5f9513ea 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -72,6 +72,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/codegen-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index 1f7aef4f0d..9054187a12 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -33,8 +33,6 @@
var $Set = global.Set;
var $Map = global.Map;
-var $WeakMap = global.WeakMap;
-var $WeakSet = global.WeakSet;
// Global sentinel to be used instead of undefined keys, which are not
// supported internally but required for Harmony sets and maps.
@@ -230,174 +228,3 @@ function SetUpMap() {
}
SetUpMap();
-
-
-// -------------------------------------------------------------------
-// Harmony WeakMap
-
-function WeakMapConstructor() {
- if (%_IsConstructCall()) {
- %WeakCollectionInitialize(this);
- } else {
- throw MakeTypeError('constructor_not_function', ['WeakMap']);
- }
-}
-
-
-function WeakMapGet(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.get', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionGet(this, key);
-}
-
-
-function WeakMapSet(key, value) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.set', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionSet(this, key, value);
-}
-
-
-function WeakMapHas(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.has', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionHas(this, key);
-}
-
-
-function WeakMapDelete(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.delete', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionDelete(this, key);
-}
-
-
-function WeakMapClear() {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.clear', this]);
- }
- // Replace the internal table with a new empty table.
- %WeakCollectionInitialize(this);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpWeakMap() {
- %CheckIsBootstrapping();
-
- %SetCode($WeakMap, WeakMapConstructor);
- %FunctionSetPrototype($WeakMap, new $Object());
- %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
-
- // Set up the non-enumerable functions on the WeakMap prototype object.
- InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
- "get", WeakMapGet,
- "set", WeakMapSet,
- "has", WeakMapHas,
- "delete", WeakMapDelete,
- "clear", WeakMapClear
- ));
-}
-
-SetUpWeakMap();
-
-
-// -------------------------------------------------------------------
-// Harmony WeakSet
-
-function WeakSetConstructor() {
- if (%_IsConstructCall()) {
- %WeakCollectionInitialize(this);
- } else {
- throw MakeTypeError('constructor_not_function', ['WeakSet']);
- }
-}
-
-
-function WeakSetAdd(value) {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.add', this]);
- }
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
- return %WeakCollectionSet(this, value, true);
-}
-
-
-function WeakSetHas(value) {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.has', this]);
- }
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
- return %WeakCollectionHas(this, value);
-}
-
-
-function WeakSetDelete(value) {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.delete', this]);
- }
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
- return %WeakCollectionDelete(this, value);
-}
-
-
-function WeakSetClear() {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.clear', this]);
- }
- // Replace the internal table with a new empty table.
- %WeakCollectionInitialize(this);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpWeakSet() {
- %CheckIsBootstrapping();
-
- %SetCode($WeakSet, WeakSetConstructor);
- %FunctionSetPrototype($WeakSet, new $Object());
- %SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
-
- // Set up the non-enumerable functions on the WeakSet prototype object.
- InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
- "add", WeakSetAdd,
- "has", WeakSetHas,
- "delete", WeakSetDelete,
- "clear", WeakSetClear
- ));
-}
-
-SetUpWeakSet();
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index a69ef4c765..54d4565e2d 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -269,7 +269,7 @@ void CompilationCacheScript::Put(Handle<String> source,
Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
@@ -280,7 +280,7 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupEval(
- *source, *context, language_mode, scope_position);
+ *source, *context, strict_mode, scope_position);
if (result->IsSharedFunctionInfo()) {
break;
}
@@ -421,7 +421,7 @@ Handle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
if (!IsEnabled()) {
return Handle<SharedFunctionInfo>::null();
@@ -430,11 +430,11 @@ Handle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<SharedFunctionInfo> result;
if (context->IsNativeContext()) {
result = eval_global_.Lookup(
- source, context, language_mode, scope_position);
+ source, context, strict_mode, scope_position);
} else {
ASSERT(scope_position != RelocInfo::kNoPosition);
result = eval_contextual_.Lookup(
- source, context, language_mode, scope_position);
+ source, context, strict_mode, scope_position);
}
return result;
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index ead52b5fa4..b31de3111f 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -136,10 +136,9 @@ class CompilationCacheScript : public CompilationSubCache {
// entries:
// 1. The source string.
// 2. The shared function info of the calling function.
-// 3. Whether the source should be compiled as strict code or as non-strict
-// code.
+// 3. Whether the source should be compiled as strict code or as sloppy code.
// Note: Currently there are clients of CompileEval that always compile
-// non-strict code even if the calling function is a strict mode function.
+// sloppy code even if the calling function is a strict mode function.
// More specifically these are the CompileString, DebugEvaluate and
// DebugEvaluateGlobal runtime functions.
// 4. The start position of the calling scope.
@@ -150,7 +149,7 @@ class CompilationCacheEval: public CompilationSubCache {
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position);
void Put(Handle<String> source,
@@ -222,7 +221,7 @@ class CompilationCache {
// contain a script for the given source string.
Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position);
// Returns the regexp data associated with the given regexp if it
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index b9e13c1661..4b539897b8 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -56,37 +56,40 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE)),
+ : flags_(StrictModeField::encode(SLOPPY)),
script_(script),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true) {
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
+ : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true) {
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script_->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
+ : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true) {
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -94,11 +97,11 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
Isolate* isolate,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) |
- IsLazy::encode(true)),
+ : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true) {
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
@@ -112,7 +115,8 @@ void CompilationInfo::Initialize(Isolate* isolate,
scope_ = NULL;
global_scope_ = NULL;
extension_ = NULL;
- pre_parse_data_ = NULL;
+ cached_data_ = NULL;
+ cached_data_mode_ = NO_CACHED_DATA;
zone_ = zone;
deferred_handles_ = NULL;
code_stub_ = NULL;
@@ -133,8 +137,8 @@ void CompilationInfo::Initialize(Isolate* isolate,
MarkAsNative();
}
if (!shared_info_.is_null()) {
- ASSERT(language_mode() == CLASSIC_MODE);
- SetLanguageMode(shared_info_->language_mode());
+ ASSERT(strict_mode() == SLOPPY);
+ SetStrictMode(shared_info_->strict_mode());
}
set_bailout_reason(kUnknown);
}
@@ -211,8 +215,7 @@ Code::Flags CompilationInfo::flags() const {
return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(),
code_stub()->GetExtraICState(),
- code_stub()->GetStubType(),
- code_stub()->GetStubFlags());
+ code_stub()->GetStubType());
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
@@ -225,7 +228,7 @@ void CompilationInfo::DisableOptimization() {
FLAG_optimize_closures &&
closure_.is_null() &&
!scope_->HasTrivialOuterContext() &&
- !scope_->outer_scope_calls_non_strict_eval() &&
+ !scope_->outer_scope_calls_sloppy_eval() &&
!scope_->inside_with();
SetMode(is_optimizable_closure ? BASE : NONOPT);
}
@@ -243,6 +246,13 @@ bool CompilationInfo::ShouldSelfOptimize() {
}
+void CompilationInfo::PrepareForCompilation(Scope* scope) {
+ ASSERT(scope_ == NULL);
+ scope_ = scope;
+ function()->ProcessFeedbackSlots(isolate_);
+}
+
+
class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
public:
explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
@@ -363,7 +373,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// Note that we use the same AST that we will use for generating the
// optimized code.
unoptimized.SetFunction(info()->function());
- unoptimized.SetScope(info()->scope());
+ unoptimized.PrepareForCompilation(info()->scope());
unoptimized.SetContext(info()->context());
if (should_recompile) unoptimized.EnableDeoptimizationSupport();
bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
@@ -398,7 +408,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// Type-check the function.
AstTyper::Run(info());
- graph_builder_ = FLAG_emit_opt_code_positions
+ graph_builder_ = FLAG_hydrogen_track_positions
? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
: new(info()->zone()) HOptimizedGraphBuilder(info());
@@ -571,7 +581,7 @@ static void UpdateSharedFunctionInfo(CompilationInfo* info) {
shared->set_dont_optimize_reason(lit->dont_optimize_reason());
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count());
- shared->set_language_mode(lit->language_mode());
+ shared->set_strict_mode(lit->strict_mode());
}
@@ -596,7 +606,7 @@ static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_allows_lazy_compilation_without_context(
lit->AllowsLazyCompilationWithoutContext());
- function_info->set_language_mode(lit->language_mode());
+ function_info->set_strict_mode(lit->strict_mode());
function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
function_info->set_ast_node_count(lit->ast_node_count());
@@ -627,8 +637,7 @@ static Handle<Code> GetUnoptimizedCodeCommon(CompilationInfo* info) {
VMState<COMPILER> state(info->isolate());
PostponeInterruptsScope postpone(info->isolate());
if (!Parser::Parse(info)) return Handle<Code>::null();
- LanguageMode language_mode = info->function()->language_mode();
- info->SetLanguageMode(language_mode);
+ info->SetStrictMode(info->function()->strict_mode());
if (!CompileUnoptimizedCode(info)) return Handle<Code>::null();
Compiler::RecordFunctionCompilation(
@@ -736,8 +745,7 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
info.MarkAsGlobal();
if (!Parser::Parse(&info)) return;
- LanguageMode language_mode = info.function()->language_mode();
- info.SetLanguageMode(language_mode);
+ info.SetStrictMode(info.function()->strict_mode());
LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (!CompileUnoptimizedCode(&info)) return;
@@ -775,10 +783,20 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
ASSERT(info->is_eval() || info->is_global());
bool parse_allow_lazy =
- (info->pre_parse_data() != NULL ||
+ (info->cached_data_mode() == CONSUME_CACHED_DATA ||
String::cast(script->source())->length() > FLAG_min_preparse_length) &&
!DebuggerWantsEagerCompilation(info);
+ if (!parse_allow_lazy && info->cached_data_mode() != NO_CACHED_DATA) {
+ // We are going to parse eagerly, but we either 1) have cached data produced
+ // by lazy parsing or 2) are asked to generate cached data. We cannot use
+ // the existing data, since it won't contain all the symbols we need for
+ // eager parsing. In addition, it doesn't make sense to produce the data
+ // when parsing eagerly. That data would contain all symbols, but no
+ // functions, so it cannot be used to aid lazy parsing later.
+ info->SetCachedData(NULL, NO_CACHED_DATA);
+ }
+
Handle<SharedFunctionInfo> result;
{ VMState<COMPILER> state(info->isolate());
@@ -846,7 +864,7 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
ParseRestriction restriction,
int scope_position) {
Isolate* isolate = source->GetIsolate();
@@ -856,14 +874,14 @@ Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source,
CompilationCache* compilation_cache = isolate->compilation_cache();
Handle<SharedFunctionInfo> shared_info = compilation_cache->LookupEval(
- source, context, language_mode, scope_position);
+ source, context, strict_mode, scope_position);
if (shared_info.is_null()) {
Handle<Script> script = isolate->factory()->NewScript(source);
CompilationInfoWithZone info(script);
info.MarkAsEval();
if (context->IsNativeContext()) info.MarkAsGlobal();
- info.SetLanguageMode(language_mode);
+ info.SetStrictMode(strict_mode);
info.SetParseRestriction(restriction);
info.SetContext(context);
@@ -880,14 +898,8 @@ Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source,
// to handle eval-code in the optimizing compiler.
shared_info->DisableOptimization(kEval);
- // If caller is strict mode, the result must be in strict mode or
- // extended mode as well, but not the other way around. Consider:
- // eval("'use strict'; ...");
- ASSERT(language_mode != STRICT_MODE || !shared_info->is_classic_mode());
- // If caller is in extended mode, the result must also be in
- // extended mode.
- ASSERT(language_mode != EXTENDED_MODE ||
- shared_info->is_extended_mode());
+ // If caller is strict mode, the result must be in strict mode as well.
+ ASSERT(strict_mode == SLOPPY || shared_info->strict_mode() == STRICT);
if (!shared_info->dont_cache()) {
compilation_cache->PutEval(
source, context, shared_info, scope_position);
@@ -902,16 +914,25 @@ Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source,
}
-Handle<SharedFunctionInfo> Compiler::CompileScript(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag natives) {
+Handle<SharedFunctionInfo> Compiler::CompileScript(
+ Handle<String> source,
+ Handle<Object> script_name,
+ int line_offset,
+ int column_offset,
+ bool is_shared_cross_origin,
+ Handle<Context> context,
+ v8::Extension* extension,
+ ScriptDataImpl** cached_data,
+ CachedDataMode cached_data_mode,
+ NativesFlag natives) {
+ if (cached_data_mode == NO_CACHED_DATA) {
+ cached_data = NULL;
+ } else if (cached_data_mode == PRODUCE_CACHED_DATA) {
+ ASSERT(cached_data && !*cached_data);
+ } else {
+ ASSERT(cached_data_mode == CONSUME_CACHED_DATA);
+ ASSERT(cached_data && *cached_data);
+ }
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_load_size()->Increment(source_length);
@@ -952,18 +973,13 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(Handle<String> source,
}
script->set_is_shared_cross_origin(is_shared_cross_origin);
- script->set_data(script_data.is_null() ? isolate->heap()->undefined_value()
- : *script_data);
-
// Compile the function and add it to the cache.
CompilationInfoWithZone info(script);
info.MarkAsGlobal();
info.SetExtension(extension);
- info.SetPreParseData(pre_data);
+ info.SetCachedData(cached_data, cached_data_mode);
info.SetContext(context);
- if (FLAG_use_strict) {
- info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
- }
+ if (FLAG_use_strict) info.SetStrictMode(STRICT);
result = CompileToplevel(&info);
if (extension == NULL && !result.is_null() && !result->dont_cache()) {
compilation_cache->PutScript(source, context, result);
@@ -982,8 +998,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Precondition: code has been parsed and scopes have been analyzed.
CompilationInfoWithZone info(script);
info.SetFunction(literal);
- info.SetScope(literal->scope());
- info.SetLanguageMode(literal->scope()->language_mode());
+ info.PrepareForCompilation(literal->scope());
+ info.SetStrictMode(literal->scope()->strict_mode());
Isolate* isolate = info.isolate();
Factory* factory = isolate->factory();
@@ -1078,8 +1094,7 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
static bool CompileOptimizedPrologue(CompilationInfo* info) {
if (!Parser::Parse(info)) return false;
- LanguageMode language_mode = info->function()->language_mode();
- info->SetLanguageMode(language_mode);
+ info->SetStrictMode(info->function()->strict_mode());
if (!Rewriter::Rewrite(info)) return false;
if (!Scope::Analyze(info)) return false;
@@ -1178,7 +1193,7 @@ Handle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
function->PrintName();
- PrintF("]\n");
+ PrintF(": %s]\n", GetBailoutReason(info->bailout_reason()));
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 3bf4db5780..3802016883 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -45,6 +45,12 @@ enum ParseRestriction {
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
+enum CachedDataMode {
+ NO_CACHED_DATA,
+ CONSUME_CACHED_DATA,
+ PRODUCE_CACHED_DATA
+};
+
struct OffsetRange {
OffsetRange(int from, int to) : from(from), to(to) {}
int from;
@@ -66,11 +72,7 @@ class CompilationInfo {
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
- bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
- bool is_extended_mode() const { return language_mode() == EXTENDED_MODE; }
- LanguageMode language_mode() const {
- return LanguageModeField::decode(flags_);
- }
+ StrictMode strict_mode() const { return StrictModeField::decode(flags_); }
bool is_in_loop() const { return IsInLoop::decode(flags_); }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
@@ -81,7 +83,10 @@ class CompilationInfo {
Handle<Script> script() const { return script_; }
HydrogenCodeStub* code_stub() const {return code_stub_; }
v8::Extension* extension() const { return extension_; }
- ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
+ ScriptDataImpl** cached_data() const { return cached_data_; }
+ CachedDataMode cached_data_mode() const {
+ return cached_data_mode_;
+ }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
Handle<Code> unoptimized_code() const { return unoptimized_code_; }
@@ -109,11 +114,9 @@ class CompilationInfo {
bool this_has_uses() {
return this_has_uses_;
}
- void SetLanguageMode(LanguageMode language_mode) {
- ASSERT(this->language_mode() == CLASSIC_MODE ||
- this->language_mode() == language_mode ||
- language_mode == EXTENDED_MODE);
- flags_ = LanguageModeField::update(flags_, language_mode);
+ void SetStrictMode(StrictMode strict_mode) {
+ ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
+ flags_ = StrictModeField::update(flags_, strict_mode);
}
void MarkAsInLoop() {
ASSERT(is_lazy());
@@ -175,10 +178,8 @@ class CompilationInfo {
ASSERT(function_ == NULL);
function_ = literal;
}
- void SetScope(Scope* scope) {
- ASSERT(scope_ == NULL);
- scope_ = scope;
- }
+ // When the scope is applied, we may have deferred work to do on the function.
+ void PrepareForCompilation(Scope* scope);
void SetGlobalScope(Scope* global_scope) {
ASSERT(global_scope_ == NULL);
global_scope_ = global_scope;
@@ -188,9 +189,15 @@ class CompilationInfo {
ASSERT(!is_lazy());
extension_ = extension;
}
- void SetPreParseData(ScriptDataImpl* pre_parse_data) {
- ASSERT(!is_lazy());
- pre_parse_data_ = pre_parse_data;
+ void SetCachedData(ScriptDataImpl** cached_data,
+ CachedDataMode cached_data_mode) {
+ cached_data_mode_ = cached_data_mode;
+ if (cached_data_mode == NO_CACHED_DATA) {
+ cached_data_ = NULL;
+ } else {
+ ASSERT(!is_lazy());
+ cached_data_ = cached_data;
+ }
}
void SetContext(Handle<Context> context) {
context_ = context;
@@ -229,6 +236,7 @@ class CompilationInfo {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
unoptimized_code_ = unoptimized;
+ optimization_id_ = isolate()->NextOptimizationId();
}
void DisableOptimization();
@@ -317,6 +325,8 @@ class CompilationInfo {
return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure_);
}
+ int optimization_id() const { return optimization_id_; }
+
protected:
CompilationInfo(Handle<Script> script,
Zone* zone);
@@ -359,26 +369,26 @@ class CompilationInfo {
// Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
- class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
+ class StrictModeField: public BitField<StrictMode, 4, 1> {};
// Is this a function from our natives.
- class IsNative: public BitField<bool, 6, 1> {};
+ class IsNative: public BitField<bool, 5, 1> {};
// Is this code being compiled with support for deoptimization..
- class SupportsDeoptimization: public BitField<bool, 7, 1> {};
+ class SupportsDeoptimization: public BitField<bool, 6, 1> {};
// If compiling for debugging produce just full code matching the
// initial mode setting.
- class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
+ class IsCompilingForDebugging: public BitField<bool, 7, 1> {};
// If the compiled code contains calls that require building a frame
- class IsCalling: public BitField<bool, 9, 1> {};
+ class IsCalling: public BitField<bool, 8, 1> {};
// If the compiled code contains calls that require building a frame
- class IsDeferredCalling: public BitField<bool, 10, 1> {};
+ class IsDeferredCalling: public BitField<bool, 9, 1> {};
// If the compiled code contains calls that require building a frame
- class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
+ class IsNonDeferredCalling: public BitField<bool, 10, 1> {};
// If the compiled code saves double caller registers that it clobbers.
- class SavesCallerDoubles: public BitField<bool, 12, 1> {};
+ class SavesCallerDoubles: public BitField<bool, 11, 1> {};
// If the set of valid statements is restricted.
- class ParseRestricitonField: public BitField<ParseRestriction, 13, 1> {};
+ class ParseRestricitonField: public BitField<ParseRestriction, 12, 1> {};
// If the function requires a frame (for unspecified reasons)
- class RequiresFrame: public BitField<bool, 14, 1> {};
+ class RequiresFrame: public BitField<bool, 13, 1> {};
unsigned flags_;
@@ -402,7 +412,8 @@ class CompilationInfo {
// Fields possibly needed for eager compilation, NULL by default.
v8::Extension* extension_;
- ScriptDataImpl* pre_parse_data_;
+ ScriptDataImpl** cached_data_;
+ CachedDataMode cached_data_mode_;
// The context of the caller for eval code, and the global context for a
// global script. Will be a null handle otherwise.
@@ -452,6 +463,8 @@ class CompilationInfo {
Handle<Foreign> object_wrapper_;
+ int optimization_id_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -615,21 +628,22 @@ class Compiler : public AllStatic {
// Compile a String source within a context for eval.
static Handle<JSFunction> GetFunctionFromEval(Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
ParseRestriction restriction,
int scope_position);
// Compile a String source within a context.
- static Handle<SharedFunctionInfo> CompileScript(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag is_natives_code);
+ static Handle<SharedFunctionInfo> CompileScript(
+ Handle<String> source,
+ Handle<Object> script_name,
+ int line_offset,
+ int column_offset,
+ bool is_shared_cross_origin,
+ Handle<Context> context,
+ v8::Extension* extension,
+ ScriptDataImpl** cached_data,
+ CachedDataMode cached_data_mode,
+ NativesFlag is_natives_code);
// Create a shared function info object (the code may be lazily compiled).
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 710d30aa8e..33d47e9c4b 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -131,9 +131,9 @@ Handle<Object> Context::Lookup(Handle<String> name,
// to only do a local lookup for context extension objects.
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
object->IsJSContextExtensionObject()) {
- *attributes = object->GetLocalPropertyAttribute(*name);
+ *attributes = JSReceiver::GetLocalPropertyAttribute(object, name);
} else {
- *attributes = object->GetPropertyAttribute(*name);
+ *attributes = JSReceiver::GetPropertyAttribute(object, name);
}
if (isolate->has_pending_exception()) return Handle<Object>();
@@ -185,12 +185,12 @@ Handle<Object> Context::Lookup(Handle<String> name,
*binding_flags = (init_flag == kNeedsInitialization)
? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
break;
- case CONST:
+ case CONST_LEGACY:
*attributes = READ_ONLY;
*binding_flags = (init_flag == kNeedsInitialization)
? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
break;
- case CONST_HARMONY:
+ case CONST:
*attributes = READ_ONLY;
*binding_flags = (init_flag == kNeedsInitialization)
? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
@@ -222,8 +222,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
*index = function_index;
*attributes = READ_ONLY;
- ASSERT(mode == CONST || mode == CONST_HARMONY);
- *binding_flags = (mode == CONST)
+ ASSERT(mode == CONST_LEGACY || mode == CONST);
+ *binding_flags = (mode == CONST_LEGACY)
? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
return context;
}
@@ -368,7 +368,7 @@ Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
Handle<Object> result(error_message_for_code_gen_from_strings(),
GetIsolate());
if (!result->IsUndefined()) return result;
- return GetIsolate()->factory()->NewStringFromAscii(i::CStrVector(
+ return GetIsolate()->factory()->NewStringFromOneByte(STATIC_ASCII_VECTOR(
"Code generation from strings disallowed for this context"));
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index bd6c6a2bbc..6ba9b3ed7d 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -135,17 +135,19 @@ enum BindingFlags {
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
- V(FUNCTION_MAP_INDEX, Map, function_map) \
- V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
- V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
- V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
- strict_mode_function_without_prototype_map) \
+ V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
+ V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \
+ V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ sloppy_function_without_prototype_map) \
+ V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ strict_function_without_prototype_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
- V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
+ V(SLOPPY_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
+ sloppy_arguments_boilerplate) \
V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
aliased_arguments_boilerplate) \
- V(STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
- strict_mode_arguments_boilerplate) \
+ V(STRICT_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
+ strict_arguments_boilerplate) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
@@ -160,13 +162,19 @@ enum BindingFlags {
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
- V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
V(RUN_MICROTASKS_INDEX, JSFunction, run_microtasks) \
+ V(ENQUEUE_EXTERNAL_MICROTASK_INDEX, JSFunction, enqueue_external_microtask) \
+ V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+ V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
+ V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
+ V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
+ V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
@@ -179,9 +187,8 @@ enum BindingFlags {
observers_begin_perform_splice) \
V(OBSERVERS_END_SPLICE_INDEX, JSFunction, \
observers_end_perform_splice) \
- V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
- V(STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX, Map, \
- strict_mode_generator_function_map) \
+ V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
+ V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
generator_object_prototype_map) \
V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map)
@@ -225,8 +232,11 @@ enum BindingFlags {
// In addition, function contexts may have statically allocated context slots
// to store local variables/functions that are accessed from inner functions
// (via static context addresses) or through 'eval' (dynamic context lookups).
-// Finally, the native context contains additional slots for fast access to
-// native properties.
+// The native context contains additional slots for fast access to native
+// properties.
+//
+// Finally, with Harmony scoping, the JSFunction representing a top level
+// script will have the GlobalContext rather than a FunctionContext.
class Context: public FixedArray {
public:
@@ -255,14 +265,14 @@ class Context: public FixedArray {
// These slots are only in native contexts.
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX,
- ARGUMENTS_BOILERPLATE_INDEX,
+ SLOPPY_ARGUMENTS_BOILERPLATE_INDEX,
ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
- STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
+ STRICT_ARGUMENTS_BOILERPLATE_INDEX,
REGEXP_RESULT_MAP_INDEX,
- FUNCTION_MAP_INDEX,
- STRICT_MODE_FUNCTION_MAP_INDEX,
- FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ SLOPPY_FUNCTION_MAP_INDEX,
+ STRICT_FUNCTION_MAP_INDEX,
+ SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,
INITIAL_ARRAY_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
@@ -318,6 +328,13 @@ class Context: public FixedArray {
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
RUN_MICROTASKS_INDEX,
+ ENQUEUE_EXTERNAL_MICROTASK_INDEX,
+ IS_PROMISE_INDEX,
+ PROMISE_CREATE_INDEX,
+ PROMISE_RESOLVE_INDEX,
+ PROMISE_REJECT_INDEX,
+ PROMISE_CHAIN_INDEX,
+ PROMISE_CATCH_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
@@ -327,8 +344,8 @@ class Context: public FixedArray {
OBSERVERS_ENQUEUE_SPLICE_INDEX,
OBSERVERS_BEGIN_SPLICE_INDEX,
OBSERVERS_END_SPLICE_INDEX,
- GENERATOR_FUNCTION_MAP_INDEX,
- STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
+ SLOPPY_GENERATOR_FUNCTION_MAP_INDEX,
+ STRICT_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
GENERATOR_RESULT_MAP_INDEX,
@@ -422,12 +439,6 @@ class Context: public FixedArray {
return map == map->GetHeap()->global_context_map();
}
- // Tells whether the native context is marked with out of memory.
- inline bool has_out_of_memory();
-
- // Mark the native context with out of memory.
- inline void mark_out_of_memory();
-
// A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
@@ -488,14 +499,14 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
- static int FunctionMapIndex(LanguageMode language_mode, bool is_generator) {
+ static int FunctionMapIndex(StrictMode strict_mode, bool is_generator) {
return is_generator
- ? (language_mode == CLASSIC_MODE
- ? GENERATOR_FUNCTION_MAP_INDEX
- : STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX)
- : (language_mode == CLASSIC_MODE
- ? FUNCTION_MAP_INDEX
- : STRICT_MODE_FUNCTION_MAP_INDEX);
+ ? (strict_mode == SLOPPY
+ ? SLOPPY_GENERATOR_FUNCTION_MAP_INDEX
+ : STRICT_GENERATOR_FUNCTION_MAP_INDEX)
+ : (strict_mode == SLOPPY
+ ? SLOPPY_FUNCTION_MAP_INDEX
+ : STRICT_FUNCTION_MAP_INDEX);
}
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 3cb7ef2992..e503eb5027 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -128,7 +128,7 @@ inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
Iterator* current,
EndMark end) {
while (*current != end) {
- if (!unicode_cache->IsWhiteSpace(**current)) return true;
+ if (!unicode_cache->IsWhiteSpaceOrLineTerminator(**current)) return true;
++*current;
}
return false;
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index e0a6a60a0a..e7fab1c3dd 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -62,9 +62,7 @@ void HistogramTimer::Start() {
if (Enabled()) {
timer_.Start();
}
- if (FLAG_log_internal_timer_events) {
- LOG(isolate(), TimerEvent(Logger::START, name()));
- }
+ isolate()->event_logger()(name(), Logger::START);
}
@@ -75,9 +73,7 @@ void HistogramTimer::Stop() {
AddSample(static_cast<int>(timer_.Elapsed().InMilliseconds()));
timer_.Stop();
}
- if (FLAG_log_internal_timer_events) {
- LOG(isolate(), TimerEvent(Logger::END, name()));
- }
+ isolate()->event_logger()(name(), Logger::END);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index 2c909fa762..7eb2016bd3 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -217,6 +217,8 @@ void RemoteDebugger::Run() {
delete event;
}
+ delete conn_;
+ conn_ = NULL;
// Wait for the receiver thread to end.
receiver.Join();
}
diff --git a/deps/v8/src/d8-debug.h b/deps/v8/src/d8-debug.h
index f753177263..2d4f5e1503 100644
--- a/deps/v8/src/d8-debug.h
+++ b/deps/v8/src/d8-debug.h
@@ -31,6 +31,7 @@
#include "d8.h"
#include "debug.h"
+#include "platform/socket.h"
namespace v8 {
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 76ff4f9431..7ac0c6546a 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -119,6 +119,8 @@ class PerIsolateData {
Persistent<Context>* realms_;
Persistent<Value> realm_shared_;
+ int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
+ int arg_offset);
int RealmFind(Handle<Context> context);
};
@@ -203,7 +205,10 @@ bool Shell::ExecuteString(Isolate* isolate,
// When debugging make exceptions appear to be uncaught.
try_catch.SetVerbose(true);
}
- Handle<Script> script = Script::New(source, name);
+ ScriptOrigin origin(name);
+ ScriptCompiler::Source script_source(source, origin);
+ Handle<UnboundScript> script =
+ ScriptCompiler::CompileUnbound(isolate, &script_source);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions && !FLAG_debugger)
@@ -214,7 +219,7 @@ bool Shell::ExecuteString(Isolate* isolate,
Local<Context> realm =
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
realm->Enter();
- Handle<Value> result = script->Run();
+ Handle<Value> result = script->BindToCurrentContext()->Run();
realm->Exit();
data->realm_current_ = data->realm_switch_;
if (result.IsEmpty()) {
@@ -288,6 +293,24 @@ int PerIsolateData::RealmFind(Handle<Context> context) {
}
+int PerIsolateData::RealmIndexOrThrow(
+ const v8::FunctionCallbackInfo<v8::Value>& args,
+ int arg_offset) {
+ if (args.Length() < arg_offset || !args[arg_offset]->IsNumber()) {
+ Throw(args.GetIsolate(), "Invalid argument");
+ return -1;
+ }
+ int index = args[arg_offset]->Int32Value();
+ if (index < 0 ||
+ index >= realm_count_ ||
+ realms_[index].IsEmpty()) {
+ Throw(args.GetIsolate(), "Invalid realm index");
+ return -1;
+ }
+ return index;
+}
+
+
#ifndef V8_SHARED
// performance.now() returns a time stamp as double, measured in milliseconds.
void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -325,15 +348,8 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
// (Note that properties of global objects cannot be read/written cross-realm.)
void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
PerIsolateData* data = PerIsolateData::Get(args.GetIsolate());
- if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw(args.GetIsolate(), "Invalid argument");
- return;
- }
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw(args.GetIsolate(), "Invalid realm index");
- return;
- }
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
args.GetReturnValue().Set(
Local<Context>::New(args.GetIsolate(), data->realms_[index])->Global());
}
@@ -361,13 +377,9 @@ void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw(args.GetIsolate(), "Invalid argument");
- return;
- }
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty() ||
- index == 0 ||
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
+ if (index == 0 ||
index == data->realm_current_ || index == data->realm_switch_) {
Throw(args.GetIsolate(), "Invalid realm index");
return;
@@ -380,15 +392,8 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw(args.GetIsolate(), "Invalid argument");
- return;
- }
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw(args.GetIsolate(), "Invalid realm index");
- return;
- }
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
data->realm_switch_ = index;
}
@@ -397,20 +402,19 @@ void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (args.Length() < 2 || !args[0]->IsNumber() || !args[1]->IsString()) {
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
+ if (args.Length() < 2 || !args[1]->IsString()) {
Throw(args.GetIsolate(), "Invalid argument");
return;
}
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw(args.GetIsolate(), "Invalid realm index");
- return;
- }
- Handle<Script> script = Script::New(args[1]->ToString());
+ ScriptCompiler::Source script_source(args[1]->ToString());
+ Handle<UnboundScript> script = ScriptCompiler::CompileUnbound(
+ isolate, &script_source);
if (script.IsEmpty()) return;
Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
realm->Enter();
- Handle<Value> result = script->Run();
+ Handle<Value> result = script->BindToCurrentContext()->Run();
realm->Exit();
args.GetReturnValue().Set(result);
}
@@ -807,7 +811,8 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
Handle<String> name =
String::NewFromUtf8(isolate, shell_source_name.start(),
String::kNormalString, shell_source_name.length());
- Handle<Script> script = Script::Compile(source, name);
+ ScriptOrigin origin(name);
+ Handle<Script> script = Script::Compile(source, &origin);
script->Run();
// Mark the d8 shell script as native to avoid it showing up as normal source
// in the debugger.
@@ -1435,6 +1440,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--throws") == 0) {
options.expected_to_throw = true;
argv[i] = NULL;
+ } else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
+ options.icu_data_file = argv[i] + 16;
+ argv[i] = NULL;
}
#ifdef V8_SHARED
else if (strcmp(argv[i], "--dump-counters") == 0) {
@@ -1669,7 +1677,7 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
- v8::V8::InitializeICU();
+ v8::V8::InitializeICU(options.icu_data_file);
#ifndef V8_SHARED
i::FLAG_trace_hydrogen_file = "hydrogen.cfg";
i::FLAG_redirect_code_traces_to = "code.asm";
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index db2edb93c9..3edd8a7307 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -233,7 +233,8 @@ class ShellOptions {
expected_to_throw(false),
mock_arraybuffer_allocator(false),
num_isolates(1),
- isolate_sources(NULL) { }
+ isolate_sources(NULL),
+ icu_data_file(NULL) { }
~ShellOptions() {
#ifndef V8_SHARED
@@ -258,6 +259,7 @@ class ShellOptions {
bool mock_arraybuffer_allocator;
int num_isolates;
SourceGroup* isolate_sources;
+ const char* icu_data_file;
};
#ifdef V8_SHARED
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 4afd8dc60c..70d6be989f 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -62,6 +62,7 @@ void DateCache::ResetDateCache() {
after_ = &dst_[1];
local_offset_ms_ = kInvalidLocalOffsetInMs;
ymd_valid_ = false;
+ OS::ClearTimezoneCache(tz_cache_);
}
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index fcd61db046..e9c9d9cb06 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -62,11 +62,14 @@ class DateCache {
// It is an invariant of DateCache that cache stamp is non-negative.
static const int kInvalidStamp = -1;
- DateCache() : stamp_(0) {
+ DateCache() : stamp_(0), tz_cache_(OS::CreateTimezoneCache()) {
ResetDateCache();
}
- virtual ~DateCache() {}
+ virtual ~DateCache() {
+ OS::DisposeTimezoneCache(tz_cache_);
+ tz_cache_ = NULL;
+ }
// Clears cached timezone information and increments the cache stamp.
@@ -113,7 +116,7 @@ class DateCache {
if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
time_ms = EquivalentTime(time_ms);
}
- return OS::LocalTimezone(static_cast<double>(time_ms));
+ return OS::LocalTimezone(static_cast<double>(time_ms), tz_cache_);
}
// ECMA 262 - 15.9.5.26
@@ -182,11 +185,11 @@ class DateCache {
// These functions are virtual so that we can override them when testing.
virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
double time_ms = static_cast<double>(time_sec * 1000);
- return static_cast<int>(OS::DaylightSavingsOffset(time_ms));
+ return static_cast<int>(OS::DaylightSavingsOffset(time_ms, tz_cache_));
}
virtual int GetLocalOffsetFromOS() {
- double offset = OS::LocalTimeOffset();
+ double offset = OS::LocalTimeOffset(tz_cache_);
ASSERT(offset < kInvalidLocalOffsetInMs);
return static_cast<int>(offset);
}
@@ -253,6 +256,8 @@ class DateCache {
int ymd_year_;
int ymd_month_;
int ymd_day_;
+
+ TimezoneCache* tz_cache_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index f3d4af244f..b7ecbeb397 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -46,6 +46,7 @@ var timezone_cache_timezone;
function LocalTimezone(t) {
if (NUMBER_IS_NAN(t)) return "";
+ CheckDateCacheCurrent();
if (t == timezone_cache_time) {
return timezone_cache_timezone;
}
@@ -156,6 +157,7 @@ function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
} else if (IS_STRING(year)) {
// Probe the Date cache. If we already have a time value for the
// given time, we re-use that instead of parsing the string again.
+ CheckDateCacheCurrent();
var cache = Date_cache;
if (cache.string === year) {
value = cache.time;
@@ -743,15 +745,26 @@ function DateToJSON(key) {
}
-function ResetDateCache() {
+var date_cache_version_holder;
+var date_cache_version = NAN;
+
+
+function CheckDateCacheCurrent() {
+ if (!date_cache_version_holder) {
+ date_cache_version_holder = %DateCacheVersion();
+ }
+ if (date_cache_version_holder[0] == date_cache_version) {
+ return;
+ }
+ date_cache_version = date_cache_version_holder[0];
+
// Reset the timezone cache:
timezone_cache_time = NAN;
- timezone_cache_timezone = undefined;
+ timezone_cache_timezone = UNDEFINED;
// Reset the date cache:
- cache = Date_cache;
- cache.time = NAN;
- cache.string = null;
+ Date_cache.time = NAN;
+ Date_cache.string = null;
}
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 27584ce39e..7dc489de34 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -122,7 +122,7 @@ class DateParser : public AllStatic {
}
bool SkipWhiteSpace() {
- if (unicode_cache_->IsWhiteSpace(ch_)) {
+ if (unicode_cache_->IsWhiteSpaceOrLineTerminator(ch_)) {
Next();
return true;
}
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index d474e2059c..d7667f19c8 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -754,6 +754,7 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
isolate->bootstrapper()->NativesSourceLookup(index);
Vector<const char> name = Natives::GetScriptName(index);
Handle<String> script_name = factory->NewStringFromAscii(name);
+ ASSERT(!script_name.is_null());
Handle<Context> context = isolate->native_context();
// Compile the script.
@@ -762,8 +763,7 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
script_name, 0, 0,
false,
context,
- NULL, NULL,
- Handle<String>::null(),
+ NULL, NULL, NO_CACHED_DATA,
NATIVES_CODE);
// Silently ignore stack overflows during compilation.
@@ -792,7 +792,7 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
isolate->ComputeLocation(&computed_location);
Handle<Object> message = MessageHandler::MakeMessageObject(
isolate, "error_loading_debugger", &computed_location,
- Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
+ Vector<Handle<Object> >::empty(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
isolate->set_pending_exception(*exception);
@@ -853,7 +853,7 @@ bool Debug::Load() {
key,
Handle<Object>(global->builtins(), isolate_),
NONE,
- kNonStrictMode),
+ SLOPPY),
false);
// Compile the JavaScript for the debugger in the debugger context.
@@ -1900,30 +1900,34 @@ static void RedirectActivationsToRecompiledCodeOnThread(
}
// Iterate over the RelocInfo in the original code to compute the sum of the
- // constant pools sizes. (See Assembler::CheckConstPool())
- // Note that this is only useful for architectures using constant pools.
- int constpool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL);
- int frame_const_pool_size = 0;
- for (RelocIterator it(*frame_code, constpool_mask); !it.done(); it.next()) {
+ // constant pools and veneer pools sizes. (See Assembler::CheckConstPool()
+ // and Assembler::CheckVeneerPool())
+ // Note that this is only useful for architectures using constant pools or
+ // veneer pools.
+ int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
+ RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
+ int frame_pool_size = 0;
+ for (RelocIterator it(*frame_code, pool_mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->pc() >= frame->pc()) break;
- frame_const_pool_size += static_cast<int>(info->data());
+ frame_pool_size += static_cast<int>(info->data());
}
intptr_t frame_offset =
- frame->pc() - frame_code->instruction_start() - frame_const_pool_size;
+ frame->pc() - frame_code->instruction_start() - frame_pool_size;
// Iterate over the RelocInfo for new code to find the number of bytes
// generated for debug slots and constant pools.
int debug_break_slot_bytes = 0;
- int new_code_const_pool_size = 0;
+ int new_code_pool_size = 0;
int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::CONST_POOL);
+ RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
+ RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
// Check if the pc in the new code with debug break
// slots is before this slot.
RelocInfo* info = it.rinfo();
intptr_t new_offset = info->pc() - new_code->instruction_start() -
- new_code_const_pool_size - debug_break_slot_bytes;
+ new_code_pool_size - debug_break_slot_bytes;
if (new_offset >= frame_offset) {
break;
}
@@ -1932,14 +1936,14 @@ static void RedirectActivationsToRecompiledCodeOnThread(
debug_break_slot_bytes += Assembler::kDebugBreakSlotLength;
} else {
ASSERT(RelocInfo::IsConstPool(info->rmode()));
- // The size of the constant pool is encoded in the data.
- new_code_const_pool_size += static_cast<int>(info->data());
+ // The size of the pools is encoded in the data.
+ new_code_pool_size += static_cast<int>(info->data());
}
}
// Compute the equivalent pc in the new code.
byte* new_pc = new_code->instruction_start() + frame_offset +
- debug_break_slot_bytes + new_code_const_pool_size;
+ debug_break_slot_bytes + new_code_pool_size;
if (FLAG_trace_deopt) {
PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
@@ -2360,7 +2364,7 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Continue just after the slot.
thread_local_.after_break_target_ = addr + Assembler::kDebugBreakSlotLength;
- } else if (IsDebugBreak(Assembler::target_address_at(addr))) {
+ } else if (IsDebugBreak(Assembler::target_address_at(addr, *code))) {
// We now know that there is still a debug break call at the target address,
// so the break point is still there and the original code will hold the
// address to jump to in order to complete the call which is replaced by a
@@ -2371,13 +2375,15 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Install jump to the call address in the original code. This will be the
// call which was overwritten by the call to DebugBreakXXX.
- thread_local_.after_break_target_ = Assembler::target_address_at(addr);
+ thread_local_.after_break_target_ =
+ Assembler::target_address_at(addr, *original_code);
} else {
// There is no longer a break point present. Don't try to look in the
// original code as the running code will have the right address. This takes
// care of the case where the last break point is removed from the function
// and therefore no "original code" is available.
- thread_local_.after_break_target_ = Assembler::target_address_at(addr);
+ thread_local_.after_break_target_ =
+ Assembler::target_address_at(addr, *code);
}
}
@@ -2594,6 +2600,7 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
// Create the execution state object.
Handle<String> constructor_str =
isolate_->factory()->InternalizeUtf8String(constructor_name);
+ ASSERT(!constructor_str.is_null());
Handle<Object> constructor(
isolate_->global_object()->GetPropertyNoExceptionThrown(*constructor_str),
isolate_);
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 29575d8c00..4d5e60573d 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -342,7 +342,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// Unlink this function and evict from optimized code map.
SharedFunctionInfo* shared = function->shared();
function->set_code(shared->code());
- shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
@@ -358,9 +357,41 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
SelectedCodeUnlinker unlinker;
VisitAllOptimizedFunctionsForContext(context, &unlinker);
+ Isolate* isolate = context->GetHeap()->isolate();
+#ifdef DEBUG
+ Code* topmost_optimized_code = NULL;
+ bool safe_to_deopt_topmost_optimized_code = false;
+ // Make sure all activations of optimized code can deopt at their current PC.
+ // The topmost optimized code has special handling because it cannot be
+ // deoptimized due to weak object dependency.
+ for (StackFrameIterator it(isolate, isolate->thread_local_top());
+ !it.done(); it.Advance()) {
+ StackFrame::Type type = it.frame()->type();
+ if (type == StackFrame::OPTIMIZED) {
+ Code* code = it.frame()->LookupCode();
+ if (FLAG_trace_deopt) {
+ JSFunction* function =
+ static_cast<OptimizedFrame*>(it.frame())->function();
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimizer found activation of function: ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(),
+ " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ }
+ SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
+ int deopt_index = safepoint.deoptimization_index();
+ bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex;
+ CHECK(topmost_optimized_code == NULL || safe_to_deopt);
+ if (topmost_optimized_code == NULL) {
+ topmost_optimized_code = code;
+ safe_to_deopt_topmost_optimized_code = safe_to_deopt;
+ }
+ }
+ }
+#endif
+
// Move marked code from the optimized code list to the deoptimized
// code list, collecting them into a ZoneList.
- Isolate* isolate = context->GetHeap()->isolate();
Zone zone(isolate);
ZoneList<Code*> codes(10, &zone);
@@ -393,35 +424,17 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
element = next;
}
-#ifdef DEBUG
- // Make sure all activations of optimized code can deopt at their current PC.
- for (StackFrameIterator it(isolate, isolate->thread_local_top());
- !it.done(); it.Advance()) {
- StackFrame::Type type = it.frame()->type();
- if (type == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
- if (FLAG_trace_deopt) {
- JSFunction* function =
- static_cast<OptimizedFrame*>(it.frame())->function();
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[deoptimizer patches for lazy deopt: ");
- function->PrintName(scope.file());
- PrintF(scope.file(),
- " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
- }
- SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
- int deopt_index = safepoint.deoptimization_index();
- CHECK(deopt_index != Safepoint::kNoDeoptimizationIndex);
- }
- }
-#endif
-
// TODO(titzer): we need a handle scope only because of the macro assembler,
// which is only used in EnsureCodeForDeoptimizationEntry.
HandleScope scope(isolate);
// Now patch all the codes for deoptimization.
for (int i = 0; i < codes.length(); i++) {
+#ifdef DEBUG
+ if (codes[i] == topmost_optimized_code) {
+ ASSERT(safe_to_deopt_topmost_optimized_code);
+ }
+#endif
// It is finally time to die, code object.
// Do platform-specific patching to force any activations to lazy deopt.
PatchCodeForDeoptimization(isolate, codes[i]);
@@ -755,6 +768,12 @@ void Deoptimizer::DoComputeOutputFrames() {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
ElapsedTimer timer;
+
+ // Determine basic deoptimization information. The optimized frame is
+ // described by the input data.
+ DeoptimizationInputData* input_data =
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
+
if (trace_scope_ != NULL) {
timer.Start();
PrintF(trace_scope_->file(),
@@ -763,7 +782,8 @@ void Deoptimizer::DoComputeOutputFrames() {
reinterpret_cast<intptr_t>(function_));
PrintFunctionName();
PrintF(trace_scope_->file(),
- " @%d, FP to SP delta: %d]\n",
+ " (opt #%d) @%d, FP to SP delta: %d]\n",
+ input_data->OptimizationId()->value(),
bailout_id_,
fp_to_sp_delta_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
@@ -771,10 +791,6 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
- // Determine basic deoptimization information. The optimized frame is
- // described by the input data.
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
@@ -990,24 +1006,19 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
if (FLAG_enable_ool_constant_pool) {
// For the bottommost output frame the constant pool pointer can be gotten
- // from the input frame. For subsequent output frames, it can be gotten from
- // the function's code.
- Register constant_pool_reg =
- JavaScriptFrame::constant_pool_pointer_register();
+ // from the input frame. For subsequent output frames, it can be read from
+ // the previous frame.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
- value = reinterpret_cast<intptr_t>(
- function->shared()->code()->constant_pool());
+ value = output_[frame_index - 1]->GetConstantPool();
}
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetConstantPool(value);
- if (is_topmost) output_frame->SetRegister(constant_pool_reg.code(), value);
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; constant_pool\n",
+ V8PRIxPTR "; caller's constant_pool\n",
top_address + output_offset, output_offset, value);
}
}
@@ -1065,6 +1076,18 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
output_frame->SetPc(pc_value);
+ // Update constant pool.
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ if (is_topmost) {
+ Register constant_pool_reg =
+ JavaScriptFrame::constant_pool_pointer_register();
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+ }
+ }
+
FullCodeGenerator::State state =
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
@@ -1148,15 +1171,14 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // A marker value is used in place of the constant pool.
+ // Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
- intptr_t constant_pool = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, constant_pool);
+ value = output_[frame_index - 1]->GetConstantPool();
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant_pool (adaptor sentinel)\n",
- top_address + output_offset, output_offset, constant_pool);
+ V8PRIxPTR "; caller's constant_pool\n",
+ top_address + output_offset, output_offset, value);
}
}
@@ -1203,6 +1225,11 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
adaptor_trampoline->instruction_start() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc_value);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(adaptor_trampoline->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1278,13 +1305,13 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // The constant pool pointer can be gotten from the previous frame.
+ // Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant pool\n",
+ V8PRIxPTR " ; caller's constant pool\n",
top_address + output_offset, output_offset, value);
}
}
@@ -1365,6 +1392,11 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
output_frame->SetPc(pc);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(construct_stub->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1436,13 +1468,13 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // The constant pool pointer can be gotten from the previous frame.
+ // Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant pool\n",
+ V8PRIxPTR " ; caller's constant pool\n",
top_address + output_offset, output_offset, value);
}
}
@@ -1504,6 +1536,11 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
intptr_t pc = reinterpret_cast<intptr_t>(
accessor_stub->instruction_start() + offset->value());
output_frame->SetPc(pc);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1607,17 +1644,14 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // The constant pool pointer can be gotten from the input frame.
- Register constant_pool_pointer_register =
- StubFailureTrampolineFrame::constant_pool_pointer_register();
+ // Read the caller's constant pool from the input frame.
input_frame_offset -= kPointerSize;
value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(constant_pool_pointer_register.code(), value);
output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
+ output_frame->SetCallerConstantPool(output_frame_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant_pool_pointer\n",
+ V8PRIxPTR " ; caller's constant_pool\n",
top_address + output_frame_offset, output_frame_offset, value);
}
}
@@ -1751,6 +1785,14 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
ASSERT(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
+ if (FLAG_enable_ool_constant_pool) {
+ Register constant_pool_reg =
+ StubFailureTrampolineFrame::constant_pool_pointer_register();
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(trampoline->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+ }
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
Code* notify_failure = NotifyStubFailureBuiltin();
output_frame->SetContinuation(
@@ -2716,6 +2758,9 @@ FrameDescription::FrameDescription(uint32_t frame_size,
constant_pool_(kZapUint32) {
// Zap all the registers.
for (int r = 0; r < Register::kNumRegisters; r++) {
+ // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
+ // isn't used before the next safepoint, the GC will try to scan it as a
+ // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
SetRegister(r, kZapUint32);
}
@@ -3306,6 +3351,13 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
// tagged and skip materializing the HeapNumber explicitly.
Handle<Object> object = GetNext(isolate, lvl + 1);
materialized_objects_.Add(object);
+ // On 32-bit architectures, there is an extra slot there because
+ // the escape analysis calculates the number of slots as
+ // object-size/pointer-size. To account for this, we read out
+ // any extra slots.
+ for (int i = 0; i < length - 2; i++) {
+ GetNext(isolate, lvl + 1);
+ }
return object;
}
case JS_OBJECT_TYPE: {
@@ -3360,7 +3412,7 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
void SlotRefValueBuilder::Finish(Isolate* isolate) {
- // We should have processed all slot
+ // We should have processed all the slots
ASSERT(slot_refs_.length() == current_slot_);
if (materialized_objects_.length() > prev_materialized_count_) {
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 67690ded0d..a36362fc93 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -134,7 +134,7 @@ class Deoptimizer : public Malloced {
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
- struct JumpTableEntry {
+ struct JumpTableEntry : public ZoneObject {
inline JumpTableEntry(Address entry,
Deoptimizer::BailoutType type,
bool frame)
@@ -508,6 +508,8 @@ class FrameDescription {
void SetCallerFp(unsigned offset, intptr_t value);
+ void SetCallerConstantPool(unsigned offset, intptr_t value);
+
intptr_t GetRegister(unsigned n) const {
#if DEBUG
// This convoluted ASSERT is needed to work around a gcc problem that
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index f02d43ad8a..2af64228c6 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -200,7 +200,7 @@ static int DecodeIt(Isolate* isolate,
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], converter.code());
// Indent the printing of the reloc info.
if (i == 0) {
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index d2abb0442a..ff458e0ea1 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -66,7 +66,7 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
return kPointerSizeLog2;
}
UNREACHABLE();
@@ -142,14 +142,27 @@ int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
}
+ElementsKind GetNextTransitionElementsKind(ElementsKind kind) {
+ switch (kind) {
+#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: return EXTERNAL_##TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
+#undef FIXED_TYPED_ARRAY_CASE
+ default: {
+ int index = GetSequenceIndexFromFastElementsKind(kind);
+ return GetFastElementsKindFromSequenceIndex(index + 1);
+ }
+ }
+}
+
+
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed) {
ASSERT(IsFastElementsKind(elements_kind));
ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
while (true) {
- int index =
- GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
- elements_kind = GetFastElementsKindFromSequenceIndex(index);
+ elements_kind = GetNextTransitionElementsKind(elements_kind);
if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
return elements_kind;
}
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 5a3f00dcce..d2605e8b06 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -51,7 +51,7 @@ enum ElementsKind {
// The "slow" kind.
DICTIONARY_ELEMENTS,
- NON_STRICT_ARGUMENTS_ELEMENTS,
+ SLOPPY_ARGUMENTS_ELEMENTS,
// The "fast" kind for external arrays
EXTERNAL_INT8_ELEMENTS,
EXTERNAL_UINT8_ELEMENTS,
@@ -100,10 +100,10 @@ void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
-ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
-
+ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number);
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
+ElementsKind GetNextTransitionElementsKind(ElementsKind elements_kind);
inline bool IsDictionaryElementsKind(ElementsKind kind) {
return kind == DICTIONARY_ELEMENTS;
@@ -116,6 +116,12 @@ inline bool IsExternalArrayElementsKind(ElementsKind kind) {
}
+inline bool IsTerminalElementsKind(ElementsKind kind) {
+ return kind == TERMINAL_FAST_ELEMENTS_KIND ||
+ IsExternalArrayElementsKind(kind);
+}
+
+
inline bool IsFixedTypedArrayElementsKind(ElementsKind kind) {
return kind >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
kind <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
@@ -128,6 +134,11 @@ inline bool IsFastElementsKind(ElementsKind kind) {
}
+inline bool IsTransitionElementsKind(ElementsKind kind) {
+ return IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind);
+}
+
+
inline bool IsFastDoubleElementsKind(ElementsKind kind) {
return kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS;
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 2e4667d4a0..0624a03621 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -68,7 +68,7 @@
// - FixedFloat64ElementsAccessor
// - FixedUint8ClampedElementsAccessor
// - DictionaryElementsAccessor
-// - NonStrictArgumentsElementsAccessor
+// - SloppyArgumentsElementsAccessor
namespace v8 {
@@ -95,7 +95,7 @@ static const int kPackedSizeNotKnown = -1;
FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
SeededNumberDictionary) \
- V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
+ V(SloppyArgumentsElementsAccessor, SLOPPY_ARGUMENTS_ELEMENTS, \
FixedArray) \
V(ExternalInt8ElementsAccessor, EXTERNAL_INT8_ELEMENTS, \
ExternalInt8Array) \
@@ -160,18 +160,18 @@ static bool HasKey(FixedArray* array, Object* key) {
}
-static Failure* ThrowArrayLengthRangeError(Heap* heap) {
- HandleScope scope(heap->isolate());
- return heap->isolate()->Throw(
- *heap->isolate()->factory()->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+static Handle<Object> ThrowArrayLengthRangeError(Isolate* isolate) {
+ isolate->Throw(
+ *isolate->factory()->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
+ return Handle<Object>();
}
-static void CopyObjectToObjectElements(FixedArrayBase* from_base,
+static void CopyObjectToObjectElements(Handle<FixedArrayBase> from_base,
ElementsKind from_kind,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
@@ -189,7 +189,7 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
@@ -197,8 +197,8 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
@@ -209,23 +209,24 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
if (IsFastObjectElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap();
- if (!heap->InNewSpace(to)) {
+ if (!heap->InNewSpace(*to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
copy_size);
}
- heap->incremental_marking()->RecordWrites(to);
+ heap->incremental_marking()->RecordWrites(*to);
}
}
-static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
+static void CopyDictionaryToObjectElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
+ Handle<SeededNumberDictionary> from =
+ Handle<SeededNumberDictionary>::cast(from_base);
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
@@ -238,15 +239,15 @@ static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
}
- ASSERT(to_base != from_base);
+ ASSERT(*to_base != *from_base);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
- FixedArray* to = FixedArray::cast(to_base);
+ Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -262,23 +263,22 @@ static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
}
}
if (IsFastObjectElementsKind(to_kind)) {
- if (!heap->InNewSpace(to)) {
+ if (!heap->InNewSpace(*to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
copy_size);
}
- heap->incremental_marking()->RecordWrites(to);
+ heap->incremental_marking()->RecordWrites(*to);
}
}
-MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
- FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- ElementsKind to_kind,
- uint32_t to_start,
- int raw_copy_size) {
+static void CopyDoubleToObjectElements(Handle<FixedArrayBase> from_base,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to_base,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int raw_copy_size) {
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
@@ -294,49 +294,35 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
- if (copy_size == 0) return from_base;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
+ if (copy_size == 0) return;
+ Handle<FixedDoubleArray> from = Handle<FixedDoubleArray>::cast(from_base);
+ Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
for (int i = 0; i < copy_size; ++i) {
+ HandleScope scope(from_base->GetIsolate());
if (IsFastSmiElementsKind(to_kind)) {
UNIMPLEMENTED();
- return Failure::Exception();
} else {
- MaybeObject* maybe_value = from->get(i + from_start);
- Object* value;
ASSERT(IsFastObjectElementsKind(to_kind));
- // Because Double -> Object elements transitions allocate HeapObjects
- // iteratively, the allocate must succeed within a single GC cycle,
- // otherwise the retry after the GC will also fail. In order to ensure
- // that no GC is triggered, allocate HeapNumbers from old space if they
- // can't be taken from new space.
- if (!maybe_value->ToObject(&value)) {
- ASSERT(maybe_value->IsRetryAfterGC() || maybe_value->IsOutOfMemory());
- Heap* heap = from->GetHeap();
- MaybeObject* maybe_value_object =
- heap->AllocateHeapNumber(from->get_scalar(i + from_start),
- TENURED);
- if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
- }
- to->set(i + to_start, value, UPDATE_WRITE_BARRIER);
+ Handle<Object> value = from->get_as_handle(i + from_start);
+ to->set(i + to_start, *value, UPDATE_WRITE_BARRIER);
}
}
- return to;
}
-static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
+static void CopyDoubleToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -345,15 +331,15 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
to_base->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ Handle<FixedDoubleArray> from = Handle<FixedDoubleArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
@@ -365,11 +351,12 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
}
-static void CopySmiToDoubleElements(FixedArrayBase* from_base,
+static void CopySmiToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -377,20 +364,20 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
copy_size = from_base->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetHeap()->the_hole_value();
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
+ Handle<Object> the_hole = from->GetIsolate()->factory()->the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
Object* hole_or_smi = from->get(from_start);
- if (hole_or_smi == the_hole) {
+ if (hole_or_smi == *the_hole) {
to->set_the_hole(to_start);
} else {
to->set(to_start, Smi::cast(hole_or_smi)->value());
@@ -399,12 +386,13 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
+static void CopyPackedSmiToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int packed_size,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
uint32_t to_end;
if (raw_copy_size < 0) {
@@ -414,7 +402,7 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
to_end = to_base->length();
for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
@@ -427,8 +415,8 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
@@ -438,11 +426,12 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
+static void CopyObjectToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -450,20 +439,20 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
copy_size = from_base->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetHeap()->the_hole_value();
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
+ Handle<Object> the_hole = from->GetIsolate()->factory()->the_hole_value();
for (uint32_t from_end = from_start + copy_size;
from_start < from_end; from_start++, to_start++) {
Object* hole_or_object = from->get(from_start);
- if (hole_or_object == the_hole) {
+ if (hole_or_object == *the_hole) {
to->set_the_hole(to_start);
} else {
to->set(to_start, hole_or_object->Number());
@@ -472,12 +461,14 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
+static void CopyDictionaryToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
+ Handle<SeededNumberDictionary> from =
+ Handle<SeededNumberDictionary>::cast(from_base);
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (copy_size < 0) {
ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
@@ -485,12 +476,12 @@ static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
copy_size = from->max_number_key() + 1 - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
if (copy_size == 0) return;
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -592,7 +583,9 @@ class ElementsAccessorBase : public ElementsAccessor {
typedef ElementsTraitsParam ElementsTraits;
typedef typename ElementsTraitsParam::BackingStore BackingStore;
- virtual ElementsKind kind() const { return ElementsTraits::Kind; }
+ virtual ElementsKind kind() const V8_FINAL V8_OVERRIDE {
+ return ElementsTraits::Kind;
+ }
static void ValidateContents(JSObject* holder, int length) {
}
@@ -616,7 +609,7 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::ValidateContents(holder, length);
}
- virtual void Validate(JSObject* holder) {
+ virtual void Validate(JSObject* holder) V8_FINAL V8_OVERRIDE {
ElementsAccessorSubclass::ValidateImpl(holder);
}
@@ -631,7 +624,7 @@ class ElementsAccessorBase : public ElementsAccessor {
virtual bool HasElement(Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -639,10 +632,24 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, backing_store);
}
- MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
+ // TODO(ishell): Temporary wrapper until handlified.
+ MUST_USE_RESULT virtual Handle<Object> Get(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
+ CALL_HEAP_FUNCTION(holder->GetIsolate(),
+ Get(*receiver, *holder, key,
+ backing_store.is_null()
+ ? NULL : *backing_store),
+ Object);
+ }
+
+ MUST_USE_RESULT virtual MaybeObject* Get(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -674,7 +681,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -697,7 +704,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -721,7 +728,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -737,73 +744,73 @@ class ElementsAccessorBase : public ElementsAccessor {
return NULL;
}
- MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
- Object* length) {
+ MUST_USE_RESULT virtual Handle<Object> SetLength(
+ Handle<JSArray> array,
+ Handle<Object> length) V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::SetLengthImpl(
- array, length, array->elements());
+ array, length, handle(array->elements()));
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store);
+ MUST_USE_RESULT static Handle<Object> SetLengthImpl(
+ Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> backing_store);
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
- JSArray* array,
+ virtual void SetCapacityAndLength(
+ Handle<JSArray> array,
int capacity,
- int length) {
- return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
- array,
- capacity,
- length);
+ int length) V8_FINAL V8_OVERRIDE {
+ ElementsAccessorSubclass::
+ SetFastElementsCapacityAndLength(array, capacity, length);
}
- MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength(
- JSObject* obj,
+ static void SetFastElementsCapacityAndLength(
+ Handle<JSObject> obj,
int capacity,
int length) {
UNIMPLEMENTED();
- return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
-
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ MUST_USE_RESULT virtual Handle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_OVERRIDE = 0;
+
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
UNREACHABLE();
- return NULL;
}
- MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
- uint32_t from_start,
- ElementsKind from_kind,
- FixedArrayBase* to,
- uint32_t to_start,
- int copy_size,
- FixedArrayBase* from) {
+ virtual void CopyElements(
+ Handle<JSObject> from_holder,
+ uint32_t from_start,
+ ElementsKind from_kind,
+ Handle<FixedArrayBase> to,
+ uint32_t to_start,
+ int copy_size,
+ Handle<FixedArrayBase> from) V8_FINAL V8_OVERRIDE {
int packed_size = kPackedSizeNotKnown;
- if (from == NULL) {
- from = from_holder->elements();
+ if (from.is_null()) {
+ from = handle(from_holder->elements());
}
- if (from_holder) {
+ if (!from_holder.is_null()) {
bool is_packed = IsFastPackedElementsKind(from_kind) &&
from_holder->IsJSArray();
if (is_packed) {
- packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value();
+ packed_size =
+ Smi::cast(Handle<JSArray>::cast(from_holder)->length())->value();
if (copy_size >= 0 && packed_size > copy_size) {
packed_size = copy_size;
}
}
}
- return ElementsAccessorSubclass::CopyElementsImpl(
+ ElementsAccessorSubclass::CopyElementsImpl(
from, from_start, to, from_kind, to_start, packed_size, copy_size);
}
@@ -811,7 +818,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Object* receiver,
JSObject* holder,
FixedArray* to,
- FixedArrayBase* from) {
+ FixedArrayBase* from) V8_FINAL V8_OVERRIDE {
int len0 = to->length();
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
@@ -889,7 +896,8 @@ class ElementsAccessorBase : public ElementsAccessor {
return backing_store->length();
}
- virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
+ virtual uint32_t GetCapacity(FixedArrayBase* backing_store)
+ V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
}
@@ -899,7 +907,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
- uint32_t index) {
+ uint32_t index) V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
}
@@ -920,34 +928,34 @@ class FastElementsAccessor
KindTraits>(name) {}
protected:
friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
- friend class NonStrictArgumentsElementsAccessor;
+ friend class SloppyArgumentsElementsAccessor;
typedef typename KindTraits::BackingStore BackingStore;
// Adjusts the length of the fast backing store or returns the new length or
// undefined in case conversion to a slow backing store should be performed.
- static MaybeObject* SetLengthWithoutNormalize(FixedArrayBase* backing_store,
- JSArray* array,
- Object* length_object,
- uint32_t length) {
+ static Handle<Object> SetLengthWithoutNormalize(
+ Handle<FixedArrayBase> backing_store,
+ Handle<JSArray> array,
+ Handle<Object> length_object,
+ uint32_t length) {
+ Isolate* isolate = array->GetIsolate();
uint32_t old_capacity = backing_store->length();
- Object* old_length = array->length();
+ Handle<Object> old_length(array->length(), isolate);
bool same_or_smaller_size = old_length->IsSmi() &&
- static_cast<uint32_t>(Smi::cast(old_length)->value()) >= length;
+ static_cast<uint32_t>(Handle<Smi>::cast(old_length)->value()) >= length;
ElementsKind kind = array->GetElementsKind();
if (!same_or_smaller_size && IsFastElementsKind(kind) &&
!IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
- MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
+ JSObject::TransitionElementsKind(array, kind);
}
// Check whether the backing store should be shrunk.
if (length <= old_capacity) {
if (array->HasFastSmiOrObjectElements()) {
- MaybeObject* maybe_obj = array->EnsureWritableFastElements();
- if (!maybe_obj->To(&backing_store)) return maybe_obj;
+ backing_store = JSObject::EnsureWritableFastElements(array);
}
if (2 * length <= old_capacity) {
// If more than half the elements won't be used, trim the array.
@@ -964,7 +972,7 @@ class FastElementsAccessor
// Otherwise, fill the unused tail with holes.
int old_length = FastD2IChecked(array->length()->Number());
for (int i = length; i < old_length; i++) {
- BackingStore::cast(backing_store)->set_the_hole(i);
+ Handle<BackingStore>::cast(backing_store)->set_the_hole(i);
}
}
return length_object;
@@ -974,53 +982,48 @@ class FastElementsAccessor
uint32_t min = JSObject::NewElementsCapacity(old_capacity);
uint32_t new_capacity = length > min ? length : min;
if (!array->ShouldConvertToSlowElements(new_capacity)) {
- MaybeObject* result = FastElementsAccessorSubclass::
+ FastElementsAccessorSubclass::
SetFastElementsCapacityAndLength(array, new_capacity, length);
- if (result->IsFailure()) return result;
array->ValidateElements();
return length_object;
}
// Request conversion to slow elements.
- return array->GetHeap()->undefined_value();
+ return isolate->factory()->undefined_value();
}
- static MaybeObject* DeleteCommon(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ static Handle<Object> DeleteCommon(Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
ASSERT(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
+ Isolate* isolate = obj->GetIsolate();
Heap* heap = obj->GetHeap();
- Object* elements = obj->elements();
- if (elements == heap->empty_fixed_array()) {
- return heap->true_value();
- }
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(elements);
- bool is_non_strict_arguments_elements_map =
- backing_store->map() == heap->non_strict_arguments_elements_map();
- if (is_non_strict_arguments_elements_map) {
- backing_store = KindTraits::BackingStore::cast(
- FixedArray::cast(backing_store)->get(1));
+ Handle<FixedArrayBase> elements(obj->elements());
+ if (*elements == heap->empty_fixed_array()) {
+ return isolate->factory()->true_value();
+ }
+ Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
+ bool is_sloppy_arguments_elements_map =
+ backing_store->map() == heap->sloppy_arguments_elements_map();
+ if (is_sloppy_arguments_elements_map) {
+ backing_store = handle(
+ BackingStore::cast(Handle<FixedArray>::cast(backing_store)->get(1)));
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
- ? Smi::cast(JSArray::cast(obj)->length())->value()
+ ? Smi::cast(Handle<JSArray>::cast(obj)->length())->value()
: backing_store->length());
if (key < length) {
- if (!is_non_strict_arguments_elements_map) {
+ if (!is_sloppy_arguments_elements_map) {
ElementsKind kind = KindTraits::Kind;
if (IsFastPackedElementsKind(kind)) {
- MaybeObject* transitioned =
- obj->TransitionElementsKind(GetHoleyElementsKind(kind));
- if (transitioned->IsFailure()) return transitioned;
+ JSObject::TransitionElementsKind(obj, GetHoleyElementsKind(kind));
}
if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
- Object* writable;
- MaybeObject* maybe = obj->EnsureWritableFastElements();
- if (!maybe->ToObject(&writable)) return maybe;
- backing_store = KindTraits::BackingStore::cast(writable);
+ Handle<Object> writable = JSObject::EnsureWritableFastElements(obj);
+ backing_store = Handle<BackingStore>::cast(writable);
}
}
backing_store->set_the_hole(key);
@@ -1030,7 +1033,7 @@ class FastElementsAccessor
// one adjacent hole to the value being deleted.
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() >= kMinLengthForSparsenessCheck &&
- !heap->InNewSpace(backing_store) &&
+ !heap->InNewSpace(*backing_store) &&
((key > 0 && backing_store->is_the_hole(key - 1)) ||
(key + 1 < length && backing_store->is_the_hole(key + 1)))) {
int num_used = 0;
@@ -1040,17 +1043,17 @@ class FastElementsAccessor
if (4 * num_used > backing_store->length()) break;
}
if (4 * num_used <= backing_store->length()) {
- MaybeObject* result = obj->NormalizeElements();
- if (result->IsFailure()) return result;
+ JSObject::NormalizeElements(obj);
}
}
}
- return heap->true_value();
+ return isolate->factory()->true_value();
}
- virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ virtual Handle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
return DeleteCommon(obj, key, mode);
}
@@ -1077,8 +1080,7 @@ class FastElementsAccessor
((map == heap->fixed_array_map() && length == 0) ||
map == heap->fixed_double_array_map())));
for (int i = 0; i < length; i++) {
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(elements);
+ BackingStore* backing_store = BackingStore::cast(elements);
ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
static_cast<Object*>(backing_store->get(i))->IsSmi()) ||
(IsFastHoleyElementsKind(KindTraits::Kind) ==
@@ -1128,13 +1130,13 @@ class FastSmiOrObjectElementsAccessor
KindTraits,
kPointerSize>(name) {}
- static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
ElementsKind to_kind = KindTraits::Kind;
switch (from_kind) {
case FAST_SMI_ELEMENTS:
@@ -1143,24 +1145,27 @@ class FastSmiOrObjectElementsAccessor
case FAST_HOLEY_ELEMENTS:
CopyObjectToObjectElements(
from, from_kind, from_start, to, to_kind, to_start, copy_size);
- return to->GetHeap()->undefined_value();
+ break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- return CopyDoubleToObjectElements(
+ CopyDoubleToObjectElements(
from, from_start, to, to_kind, to_start, copy_size);
+ break;
case DICTIONARY_ELEMENTS:
CopyDictionaryToObjectElements(
from, from_start, to, to_kind, to_start, copy_size);
- return to->GetHeap()->undefined_value();
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ break;
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
// TODO(verwaest): This is a temporary hack to support extending
- // NON_STRICT_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
+ // SLOPPY_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
// This case should be UNREACHABLE().
- FixedArray* parameter_map = FixedArray::cast(from);
- FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
- ElementsKind from_kind = ElementsKindForArray(arguments);
- return CopyElementsImpl(arguments, from_start, to, from_kind,
- to_start, packed_size, copy_size);
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(from);
+ Handle<FixedArrayBase> arguments(
+ FixedArrayBase::cast(parameter_map->get(1)));
+ ElementsKind from_kind = ElementsKindForArray(*arguments);
+ CopyElementsImpl(arguments, from_start, to, from_kind,
+ to_start, packed_size, copy_size);
+ break;
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case EXTERNAL_##TYPE##_ELEMENTS: \
@@ -1169,20 +1174,19 @@ class FastSmiOrObjectElementsAccessor
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
- return NULL;
}
- static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
- uint32_t capacity,
- uint32_t length) {
+ static void SetFastElementsCapacityAndLength(
+ Handle<JSObject> obj,
+ uint32_t capacity,
+ uint32_t length) {
JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
obj->HasFastSmiElements()
? JSObject::kAllowSmiElements
: JSObject::kDontAllowSmiElements;
- return obj->SetFastElementsCapacityAndLength(capacity,
- length,
- set_capacity_mode);
+ JSObject::SetFastElementsCapacityAndLength(
+ obj, capacity, length, set_capacity_mode);
}
};
@@ -1247,21 +1251,20 @@ class FastDoubleElementsAccessor
KindTraits,
kDoubleSize>(name) {}
- static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
- uint32_t capacity,
- uint32_t length) {
- return obj->SetFastDoubleElementsCapacityAndLength(capacity,
- length);
+ static void SetFastElementsCapacityAndLength(Handle<JSObject> obj,
+ uint32_t capacity,
+ uint32_t length) {
+ JSObject::SetFastDoubleElementsCapacityAndLength(obj, capacity, length);
}
protected:
- static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
switch (from_kind) {
case FAST_SMI_ELEMENTS:
CopyPackedSmiToDoubleElements(
@@ -1282,7 +1285,7 @@ class FastDoubleElementsAccessor
CopyDictionaryToDoubleElements(
from, from_start, to, to_start, copy_size);
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
@@ -1292,7 +1295,6 @@ class FastDoubleElementsAccessor
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
- return to->GetHeap()->undefined_value();
}
};
@@ -1373,20 +1375,21 @@ class TypedElementsAccessor
? FIELD : NONEXISTENT;
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store) {
+ MUST_USE_RESULT static Handle<Object> SetLengthImpl(
+ Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ MUST_USE_RESULT virtual Handle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
// External arrays always ignore deletes.
- return obj->GetHeap()->true_value();
+ return obj->GetIsolate()->factory()->true_value();
}
static bool HasElementImpl(Object* receiver,
@@ -1484,6 +1487,18 @@ class DictionaryElementsAccessor
return length_object;
}
+ // TODO(ishell): Temporary wrapper until handlified.
+ MUST_USE_RESULT static Handle<Object> SetLengthWithoutNormalize(
+ Handle<FixedArrayBase> store,
+ Handle<JSArray> array,
+ Handle<Object> length_object,
+ uint32_t length) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ SetLengthWithoutNormalize(
+ *store, *array, *length_object, length),
+ Object);
+ }
+
MUST_USE_RESULT static MaybeObject* DeleteCommon(
JSObject* obj,
uint32_t key,
@@ -1492,7 +1507,7 @@ class DictionaryElementsAccessor
Heap* heap = isolate->heap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
bool is_arguments =
- (obj->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS);
+ (obj->GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS);
if (is_arguments) {
backing_store = FixedArray::cast(backing_store->get(1));
}
@@ -1529,15 +1544,24 @@ class DictionaryElementsAccessor
return heap->true_value();
}
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ // TODO(ishell): Temporary wrapper until handlified.
+ MUST_USE_RESULT static Handle<Object> DeleteCommon(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ DeleteCommon(*obj, key, mode),
+ Object);
+ }
+
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
UNREACHABLE();
- return NULL;
}
@@ -1545,9 +1569,10 @@ class DictionaryElementsAccessor
friend class ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >;
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ MUST_USE_RESULT virtual Handle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
return DeleteCommon(obj, key, mode);
}
@@ -1632,18 +1657,18 @@ class DictionaryElementsAccessor
};
-class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> > {
+class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
+ SloppyArgumentsElementsAccessor,
+ ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> > {
public:
- explicit NonStrictArgumentsElementsAccessor(const char* name)
+ explicit SloppyArgumentsElementsAccessor(const char* name)
: ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >(name) {}
+ SloppyArgumentsElementsAccessor,
+ ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
protected:
friend class ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
+ SloppyArgumentsElementsAccessor,
+ ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> >;
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
@@ -1727,28 +1752,30 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* parameter_map) {
+ MUST_USE_RESULT static Handle<Object> SetLengthImpl(
+ Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
// correctly implement [[DefineOwnProperty]] on arrays.
UNIMPLEMENTED();
return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- FixedArray* parameter_map = FixedArray::cast(obj->elements());
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ MUST_USE_RESULT virtual Handle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+ Isolate* isolate = obj->GetIsolate();
+ Handle<FixedArray> parameter_map(FixedArray::cast(obj->elements()));
+ Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
// TODO(kmillikin): We could check if this was the last aliased
// parameter, and revert to normal elements in that case. That
// would enable GC of the context.
parameter_map->set_the_hole(key + 2);
} else {
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else {
@@ -1758,18 +1785,17 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
}
}
- return obj->GetHeap()->true_value();
+ return isolate->factory()->true_value();
}
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
UNREACHABLE();
- return NULL;
}
static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
@@ -1801,6 +1827,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
private:
+ // TODO(ishell): remove when all usages are handlified.
static Object* GetParameterMapArg(JSObject* holder,
FixedArray* parameter_map,
uint32_t key) {
@@ -1811,6 +1838,18 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
? parameter_map->get(key + 2)
: parameter_map->GetHeap()->the_hole_value();
}
+
+ static Handle<Object> GetParameterMapArg(Handle<JSObject> holder,
+ Handle<FixedArray> parameter_map,
+ uint32_t key) {
+ Isolate* isolate = holder->GetIsolate();
+ uint32_t length = holder->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(holder)->length())->value()
+ : parameter_map->length();
+ return key < (length - 2)
+ ? handle(parameter_map->get(key + 2), isolate)
+ : Handle<Object>::cast(isolate->factory()->the_hole_value());
+ }
};
@@ -1842,30 +1881,39 @@ void ElementsAccessor::TearDown() {
template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
-MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
- ElementsKindTraits>::
- SetLengthImpl(JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store) {
- JSArray* array = JSArray::cast(obj);
+MUST_USE_RESULT Handle<Object> ElementsAccessorBase<ElementsAccessorSubclass,
+ ElementsKindTraits>::
+ SetLengthImpl(Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> backing_store) {
+ Isolate* isolate = obj->GetIsolate();
+ Handle<JSArray> array = Handle<JSArray>::cast(obj);
// Fast case: The new length fits into a Smi.
- MaybeObject* maybe_smi_length = length->ToSmi();
- Object* smi_length = Smi::FromInt(0);
- if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
- const int value = Smi::cast(smi_length)->value();
+ Handle<Object> smi_length = Object::ToSmi(isolate, length);
+
+ if (!smi_length.is_null() && smi_length->IsSmi()) {
+ const int value = Handle<Smi>::cast(smi_length)->value();
if (value >= 0) {
- Object* new_length;
- MaybeObject* result = ElementsAccessorSubclass::
+ Handle<Object> new_length = ElementsAccessorSubclass::
SetLengthWithoutNormalize(backing_store, array, smi_length, value);
- if (!result->ToObject(&new_length)) return result;
- ASSERT(new_length->IsSmi() || new_length->IsUndefined());
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, new_length, new_length);
+
+ // even though the proposed length was a smi, new_length could
+ // still be a heap number because SetLengthWithoutNormalize doesn't
+ // allow the array length property to drop below the index of
+ // non-deletable elements.
+ ASSERT(new_length->IsSmi() || new_length->IsHeapNumber() ||
+ new_length->IsUndefined());
if (new_length->IsSmi()) {
- array->set_length(Smi::cast(new_length));
+ array->set_length(*Handle<Smi>::cast(new_length));
+ return array;
+ } else if (new_length->IsHeapNumber()) {
+ array->set_length(*new_length);
return array;
}
} else {
- return ThrowArrayLengthRangeError(array->GetHeap());
+ return ThrowArrayLengthRangeError(isolate);
}
}
@@ -1874,97 +1922,89 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
if (length->IsNumber()) {
uint32_t value;
if (length->ToArrayIndex(&value)) {
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe_object = array->NormalizeElements();
- if (!maybe_object->To(&dictionary)) return maybe_object;
- Object* new_length;
- MaybeObject* result = DictionaryElementsAccessor::
+ Handle<SeededNumberDictionary> dictionary =
+ JSObject::NormalizeElements(array);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, dictionary, dictionary);
+
+ Handle<Object> new_length = DictionaryElementsAccessor::
SetLengthWithoutNormalize(dictionary, array, length, value);
- if (!result->ToObject(&new_length)) return result;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, new_length, new_length);
+
ASSERT(new_length->IsNumber());
- array->set_length(new_length);
+ array->set_length(*new_length);
return array;
} else {
- return ThrowArrayLengthRangeError(array->GetHeap());
+ return ThrowArrayLengthRangeError(isolate);
}
}
// Fall-back case: The new length is not a number so make the array
// size one and set only element to length.
- FixedArray* new_backing_store;
- MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1);
- if (!maybe_obj->To(&new_backing_store)) return maybe_obj;
- new_backing_store->set(0, length);
- { MaybeObject* result = array->SetContent(new_backing_store);
- if (result->IsFailure()) return result;
- }
+ Handle<FixedArray> new_backing_store = isolate->factory()->NewFixedArray(1);
+ new_backing_store->set(0, *length);
+ JSArray::SetContent(array, new_backing_store);
return array;
}
-MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
- JSArray* array, Arguments* args) {
- Heap* heap = array->GetIsolate()->heap();
-
+Handle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
+ Arguments* args) {
// Optimize the case where there is one argument and the argument is a
// small smi.
if (args->length() == 1) {
- Object* obj = (*args)[0];
+ Handle<Object> obj = args->at<Object>(0);
if (obj->IsSmi()) {
- int len = Smi::cast(obj)->value();
+ int len = Handle<Smi>::cast(obj)->value();
if (len > 0 && len < JSObject::kInitialMaxFastElementArray) {
ElementsKind elements_kind = array->GetElementsKind();
- MaybeObject* maybe_array = array->Initialize(len, len);
- if (maybe_array->IsFailure()) return maybe_array;
+ JSArray::Initialize(array, len, len);
if (!IsFastHoleyElementsKind(elements_kind)) {
elements_kind = GetHoleyElementsKind(elements_kind);
- maybe_array = array->TransitionElementsKind(elements_kind);
- if (maybe_array->IsFailure()) return maybe_array;
+ JSObject::TransitionElementsKind(array, elements_kind);
}
-
return array;
} else if (len == 0) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
+ JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ return array;
}
}
// Take the argument as the length.
- MaybeObject* maybe_obj = array->Initialize(0);
- if (!maybe_obj->To(&obj)) return maybe_obj;
+ JSArray::Initialize(array, 0);
- return array->SetElementsLength((*args)[0]);
+ return JSArray::SetElementsLength(array, obj);
}
// Optimize the case where there are no parameters passed.
if (args->length() == 0) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
+ JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ return array;
}
+ Factory* factory = array->GetIsolate()->factory();
+
// Set length and elements on the array.
int number_of_elements = args->length();
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(args, 0, number_of_elements,
- ALLOW_CONVERTED_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
+ JSObject::EnsureCanContainElements(
+ array, args, 0, number_of_elements, ALLOW_CONVERTED_DOUBLE_ELEMENTS);
// Allocate an appropriately typed elements array.
- MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind();
+ Handle<FixedArrayBase> elms;
if (IsFastDoubleElementsKind(elements_kind)) {
- maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
- number_of_elements);
+ elms = Handle<FixedArrayBase>::cast(
+ factory->NewFixedDoubleArray(number_of_elements));
} else {
- maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
+ elms = Handle<FixedArrayBase>::cast(
+ factory->NewFixedArrayWithHoles(number_of_elements));
}
- FixedArrayBase* elms;
- if (!maybe_elms->To(&elms)) return maybe_elms;
// Fill in the content
switch (array->GetElementsKind()) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
- FixedArray* smi_elms = FixedArray::cast(elms);
+ Handle<FixedArray> smi_elms = Handle<FixedArray>::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, (*args)[index], SKIP_WRITE_BARRIER);
}
@@ -1974,7 +2014,7 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
case FAST_ELEMENTS: {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- FixedArray* object_elms = FixedArray::cast(elms);
+ Handle<FixedArray> object_elms = Handle<FixedArray>::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
object_elms->set(index, (*args)[index], mode);
}
@@ -1982,7 +2022,8 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
+ Handle<FixedDoubleArray> double_elms =
+ Handle<FixedDoubleArray>::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
double_elms->set(index, (*args)[index]->Number());
}
@@ -1993,7 +2034,7 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
break;
}
- array->set_elements(elms);
+ array->set_elements(*elms);
array->set_length(Smi::FromInt(number_of_elements));
return array;
}
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 6353aaecf5..44644abd92 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -65,6 +65,13 @@ class ElementsAccessor {
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
+ MUST_USE_RESULT virtual Handle<Object> Get(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store =
+ Handle<FixedArrayBase>::null()) = 0;
+
MUST_USE_RESULT virtual MaybeObject* Get(
Object* receiver,
JSObject* holder,
@@ -109,8 +116,9 @@ class ElementsAccessor {
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable.
- MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder,
- Object* new_length) = 0;
+ MUST_USE_RESULT virtual Handle<Object> SetLength(
+ Handle<JSArray> holder,
+ Handle<Object> new_length) = 0;
// Modifies both the length and capacity of a JSArray, resizing the underlying
// backing store as necessary. This method does NOT honor the semantics of
@@ -118,14 +126,16 @@ class ElementsAccessor {
// elements. This method should only be called for array expansion OR by
// runtime JavaScript code that use InternalArrays and don't care about
// EcmaScript 5.1 semantics.
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
- int capacity,
- int length) = 0;
+ virtual void SetCapacityAndLength(
+ Handle<JSArray> array,
+ int capacity,
+ int length) = 0;
// Deletes an element in an object, returning a new elements backing store.
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
+ MUST_USE_RESULT virtual Handle<Object> Delete(
+ Handle<JSObject> holder,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) = 0;
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array.
@@ -140,21 +150,22 @@ class ElementsAccessor {
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
// ignored.
- MUST_USE_RESULT virtual MaybeObject* CopyElements(
- JSObject* source_holder,
+ virtual void CopyElements(
+ Handle<JSObject> source_holder,
uint32_t source_start,
ElementsKind source_kind,
- FixedArrayBase* destination,
+ Handle<FixedArrayBase> destination,
uint32_t destination_start,
int copy_size,
- FixedArrayBase* source = NULL) = 0;
-
- MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
- FixedArrayBase* to,
- ElementsKind from_kind,
- FixedArrayBase* from = NULL) {
- return CopyElements(from_holder, 0, from_kind, to, 0,
- kCopyToEndAndInitializeToHole, from);
+ Handle<FixedArrayBase> source = Handle<FixedArrayBase>::null()) = 0;
+
+ void CopyElements(
+ Handle<JSObject> from_holder,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ Handle<FixedArrayBase> from = Handle<FixedArrayBase>::null()) {
+ CopyElements(from_holder, 0, from_kind, to, 0,
+ kCopyToEndAndInitializeToHole, from);
}
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
@@ -175,7 +186,7 @@ class ElementsAccessor {
static void TearDown();
protected:
- friend class NonStrictArgumentsElementsAccessor;
+ friend class SloppyArgumentsElementsAccessor;
virtual uint32_t GetCapacity(FixedArrayBase* backing_store) = 0;
@@ -200,8 +211,8 @@ class ElementsAccessor {
void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
bool allow_appending = false);
-MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
- JSArray* array, Arguments* args);
+Handle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
+ Arguments* args);
} } // namespace v8::internal
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index da2d880a49..7442d1732f 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -77,6 +77,13 @@ static Handle<Object> Invoke(bool is_construct,
// Entering JavaScript.
VMState<JS> state(isolate);
+ CHECK(AllowJavascriptExecution::IsAllowed(isolate));
+ if (!ThrowOnJavascriptExecution::IsAllowed(isolate)) {
+ isolate->ThrowIllegalOperation();
+ *has_pending_exception = true;
+ isolate->ReportPendingMessages();
+ return Handle<Object>();
+ }
// Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
@@ -128,11 +135,6 @@ static Handle<Object> Invoke(bool is_construct,
ASSERT(*has_pending_exception == isolate->has_pending_exception());
if (*has_pending_exception) {
isolate->ReportPendingMessages();
- if (isolate->pending_exception()->IsOutOfMemory()) {
- if (!isolate->ignore_out_of_memory()) {
- V8::FatalProcessOutOfMemory("JS", true);
- }
- }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Reset stepping state when script exits with uncaught exception.
if (isolate->debugger()->IsDebuggerActive()) {
@@ -163,9 +165,10 @@ Handle<Object> Execution::Call(Isolate* isolate,
}
Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
- // In non-strict mode, convert receiver.
+ // In sloppy mode, convert receiver.
if (convert_receiver && !receiver->IsJSReceiver() &&
- !func->shared()->native() && func->shared()->is_classic_mode()) {
+ !func->shared()->native() &&
+ func->shared()->strict_mode() == SLOPPY) {
if (receiver->IsUndefined() || receiver->IsNull()) {
Object* global = func->context()->global_object()->global_receiver();
// Under some circumstances, 'global' can be the JSBuiltinsObject
@@ -217,9 +220,6 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
ASSERT(catcher.HasCaught());
ASSERT(isolate->has_pending_exception());
ASSERT(isolate->external_caught_exception());
- if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
- V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
- }
if (isolate->pending_exception() ==
isolate->heap()->termination_exception()) {
result = isolate->factory()->termination_exception();
@@ -368,6 +368,20 @@ void Execution::RunMicrotasks(Isolate* isolate) {
}
+void Execution::EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask) {
+ bool threw = false;
+ Handle<Object> args[] = { microtask };
+ Execution::Call(
+ isolate,
+ isolate->enqueue_external_microtask(),
+ isolate->factory()->undefined_value(),
+ 1,
+ args,
+ &threw);
+ ASSERT(!threw);
+}
+
+
bool StackGuard::IsStackOverflow() {
ExecutionAccess access(isolate_);
return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -502,15 +516,15 @@ void StackGuard::FullDeopt() {
}
-bool StackGuard::IsDeoptMarkedCode() {
+bool StackGuard::IsDeoptMarkedAllocationSites() {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & DEOPT_MARKED_CODE) != 0;
+ return (thread_local_.interrupt_flags_ & DEOPT_MARKED_ALLOCATION_SITES) != 0;
}
-void StackGuard::DeoptMarkedCode() {
+void StackGuard::DeoptMarkedAllocationSites() {
ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= DEOPT_MARKED_CODE;
+ thread_local_.interrupt_flags_ |= DEOPT_MARKED_ALLOCATION_SITES;
set_interrupt_limits(access);
}
@@ -797,10 +811,10 @@ Handle<JSFunction> Execution::InstantiateFunction(
if (!data->do_not_cache()) {
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
- Object* elm =
- isolate->native_context()->function_cache()->
- GetElementNoExceptionThrown(isolate, serial_number);
- if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
+ Handle<JSObject> cache(isolate->native_context()->function_cache());
+ Handle<Object> elm =
+ Object::GetElementNoExceptionThrown(isolate, cache, serial_number);
+ if (elm->IsJSFunction()) return Handle<JSFunction>::cast(elm);
}
// The function has not yet been instantiated in this context; do it.
Handle<Object> args[] = { data };
@@ -1026,9 +1040,9 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(FULL_DEOPT);
Deoptimizer::DeoptimizeAll(isolate);
}
- if (stack_guard->IsDeoptMarkedCode()) {
- stack_guard->Continue(DEOPT_MARKED_CODE);
- Deoptimizer::DeoptimizeMarkedCode(isolate);
+ if (stack_guard->IsDeoptMarkedAllocationSites()) {
+ stack_guard->Continue(DEOPT_MARKED_ALLOCATION_SITES);
+ isolate->heap()->DeoptMarkedAllocationSites();
}
if (stack_guard->IsInstallCodeRequest()) {
ASSERT(isolate->concurrent_recompilation_enabled());
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index abf4f1dc65..592ecbdb62 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -45,7 +45,7 @@ enum InterruptFlag {
FULL_DEOPT = 1 << 6,
INSTALL_CODE = 1 << 7,
API_INTERRUPT = 1 << 8,
- DEOPT_MARKED_CODE = 1 << 9
+ DEOPT_MARKED_ALLOCATION_SITES = 1 << 9
};
@@ -175,6 +175,7 @@ class Execution : public AllStatic {
bool* has_pending_exception);
static void RunMicrotasks(Isolate* isolate);
+ static void EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask);
};
@@ -222,8 +223,8 @@ class StackGuard {
void RequestInstallCode();
bool IsFullDeopt();
void FullDeopt();
- bool IsDeoptMarkedCode();
- void DeoptMarkedCode();
+ bool IsDeoptMarkedAllocationSites();
+ void DeoptMarkedAllocationSites();
void Continue(InterruptFlag after_what);
void RequestInterrupt(InterruptCallback callback, void* data);
@@ -281,7 +282,7 @@ class StackGuard {
void EnableInterrupts();
void DisableInterrupts();
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index d372cf0125..adc5577d9e 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -107,7 +107,7 @@ void ExternalizeStringExtension::Externalize(
SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
- if (result && !string->IsInternalizedString()) {
+ if (result) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
isolate->heap()->external_string_table()->AddString(*string);
}
@@ -118,7 +118,7 @@ void ExternalizeStringExtension::Externalize(
SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
data, string->length());
result = string->MakeExternal(resource);
- if (result && !string->IsInternalizedString()) {
+ if (result) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
isolate->heap()->external_string_table()->AddString(*string);
}
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index aead7be0cc..0868db851d 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -69,6 +69,14 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
}
+Handle<FixedArray> Factory::NewUninitializedFixedArray(int size) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateUninitializedFixedArray(size),
+ FixedArray);
+}
+
+
Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
PretenureFlag pretenure) {
ASSERT(0 <= size);
@@ -81,14 +89,16 @@ Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
Handle<ConstantPoolArray> Factory::NewConstantPoolArray(
int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries) {
- ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
- number_of_int32_entries > 0);
+ ASSERT(number_of_int64_entries > 0 || number_of_code_ptr_entries > 0 ||
+ number_of_heap_ptr_entries > 0 || number_of_int32_entries > 0);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateConstantPoolArray(number_of_int64_entries,
- number_of_ptr_entries,
+ number_of_code_ptr_entries,
+ number_of_heap_ptr_entries,
number_of_int32_entries),
ConstantPoolArray);
}
@@ -279,7 +289,7 @@ Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
- PretenureFlag pretenure) {
+ PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawOneByteString(length, pretenure),
@@ -375,9 +385,7 @@ Handle<String> Factory::NewConsString(Handle<String> left,
// Make sure that an out of memory exception is thrown if the length
// of the new cons string is too large.
if (length > String::kMaxLength || length < 0) {
- isolate()->context()->mark_out_of_memory();
- V8::FatalProcessOutOfMemory("String concatenation result too large.");
- UNREACHABLE();
+ isolate()->ThrowInvalidStringLength();
return Handle<String>::null();
}
@@ -403,6 +411,7 @@ Handle<String> Factory::NewConsString(Handle<String> left,
ASSERT(left->IsFlat());
ASSERT(right->IsFlat());
+ STATIC_ASSERT(ConsString::kMinLength <= String::kMaxLength);
if (is_one_byte) {
Handle<SeqOneByteString> result = NewRawOneByteString(length);
DisallowHeapAllocation no_gc;
@@ -488,12 +497,14 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
if (!FLAG_string_slices || length < SlicedString::kMinLength) {
if (str->IsOneByteRepresentation()) {
Handle<SeqOneByteString> result = NewRawOneByteString(length);
+ ASSERT(!result.is_null());
uint8_t* dest = result->GetChars();
DisallowHeapAllocation no_gc;
String::WriteToFlat(*str, dest, begin, end);
return result;
} else {
Handle<SeqTwoByteString> result = NewRawTwoByteString(length);
+ ASSERT(!result.is_null());
uc16* dest = result->GetChars();
DisallowHeapAllocation no_gc;
String::WriteToFlat(*str, dest, begin, end);
@@ -700,7 +711,6 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_id(Smi::FromInt(id));
script->set_line_offset(Smi::FromInt(0));
script->set_column_offset(Smi::FromInt(0));
- script->set_data(heap->undefined_value());
script->set_context_data(heap->undefined_value());
script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
script->set_wrapper(*wrapper);
@@ -873,18 +883,17 @@ Handle<Map> Factory::CopyMap(Handle<Map> src) {
}
-Handle<Map> Factory::GetElementsTransitionMap(
- Handle<JSObject> src,
- ElementsKind elements_kind) {
- Isolate* i = isolate();
- CALL_HEAP_FUNCTION(i,
- src->GetElementsTransitionMap(i, elements_kind),
- Map);
+Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
+ CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedArray);
}
-Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
- CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedArray);
+Handle<FixedArray> Factory::CopyAndTenureFixedCOWArray(
+ Handle<FixedArray> array) {
+ ASSERT(isolate()->heap()->InNewSpace(*array));
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyAndTenureFixedCOWArray(*array),
+ FixedArray);
}
@@ -926,7 +935,7 @@ Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
static Handle<Map> MapForNewFunction(Isolate *isolate,
Handle<SharedFunctionInfo> function_info) {
Context *context = isolate->context()->native_context();
- int map_index = Context::FunctionMapIndex(function_info->language_mode(),
+ int map_index = Context::FunctionMapIndex(function_info->strict_mode(),
function_info->is_generator());
return Handle<Map>(Map::cast(context->get(map_index)));
}
@@ -967,7 +976,9 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
FixedArray* literals =
function_info->GetLiteralsFromOptimizedCodeMap(index);
if (literals != NULL) result->set_literals(literals);
- result->ReplaceCode(function_info->GetCodeFromOptimizedCodeMap(index));
+ Code* code = function_info->GetCodeFromOptimizedCodeMap(index);
+ ASSERT(!code->marked_for_deoptimization());
+ result->ReplaceCode(code);
return result;
}
@@ -1064,6 +1075,12 @@ Handle<Object> Factory::NewReferenceError(const char* message,
}
+Handle<Object> Factory::NewReferenceError(const char* message,
+ Handle<JSArray> args) {
+ return NewError("MakeReferenceError", message, args);
+}
+
+
Handle<Object> Factory::NewReferenceError(Handle<String> message) {
return NewError("$ReferenceError", message);
}
@@ -1113,8 +1130,8 @@ Handle<String> Factory::EmergencyNewError(const char* message,
*p++ = ' ';
space--;
if (space > 0) {
- MaybeObject* maybe_arg = args->GetElement(isolate(), i);
- Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
+ Handle<String> arg_str = Handle<String>::cast(
+ Object::GetElementNoExceptionThrown(isolate(), args, i));
SmartArrayPointer<char> arg = arg_str->ToCString();
Vector<char> v2(p, static_cast<int>(space));
OS::StrNCpy(v2, arg.get(), space);
@@ -1247,8 +1264,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> code) {
- Handle<JSFunction> function = NewFunctionWithoutPrototype(name,
- CLASSIC_MODE);
+ Handle<JSFunction> function = NewFunctionWithoutPrototype(name, SLOPPY);
function->shared()->set_code(*code);
function->set_code(*code);
ASSERT(!function->has_initial_map());
@@ -1300,12 +1316,6 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}
-Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeString(*value), String);
-}
-
-
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
JSFunction::EnsureHasInitialMap(constructor);
@@ -1315,6 +1325,17 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
+Handle<JSObject> Factory::NewJSObjectWithMemento(
+ Handle<JSFunction> constructor,
+ Handle<AllocationSite> site) {
+ JSFunction::EnsureHasInitialMap(constructor);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObject(*constructor, NOT_TENURED, *site),
+ JSObject);
+}
+
+
Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
@@ -1397,18 +1418,26 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
}
-Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
- PretenureFlag pretenure,
- bool alloc_props) {
+Handle<JSObject> Factory::NewJSObjectFromMap(
+ Handle<Map> map,
+ PretenureFlag pretenure,
+ bool alloc_props,
+ Handle<AllocationSite> allocation_site) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure, alloc_props),
+ isolate()->heap()->AllocateJSObjectFromMap(
+ *map,
+ pretenure,
+ alloc_props,
+ allocation_site.is_null() ? NULL : *allocation_site),
JSObject);
}
-Handle<JSArray> Factory::NewJSArray(int capacity,
- ElementsKind elements_kind,
+Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
if (capacity != 0) {
elements_kind = GetHoleyElementsKind(elements_kind);
@@ -1416,9 +1445,9 @@ Handle<JSArray> Factory::NewJSArray(int capacity,
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSArrayAndStorage(
elements_kind,
- 0,
+ length,
capacity,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
+ mode,
pretenure),
JSArray);
}
@@ -1426,32 +1455,28 @@ Handle<JSArray> Factory::NewJSArray(int capacity,
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure) {
+ ASSERT(length <= elements->length());
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSArrayWithElements(*elements,
elements_kind,
- elements->length(),
+ length,
pretenure),
JSArray);
}
-void Factory::SetElementsCapacityAndLength(Handle<JSArray> array,
- int capacity,
- int length) {
- ElementsAccessor* accessor = array->GetElementsAccessor();
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- accessor->SetCapacityAndLength(*array, capacity, length));
-}
-
-
-void Factory::SetContent(Handle<JSArray> array,
- Handle<FixedArrayBase> elements) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->SetContent(*elements));
+void Factory::NewJSArrayStorage(Handle<JSArray> array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode) {
+ CALL_HEAP_FUNCTION_VOID(isolate(),
+ isolate()->heap()->AllocateJSArrayStorage(*array,
+ length,
+ capacity,
+ mode));
}
@@ -1572,7 +1597,6 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
int start_position,
int end_position,
Handle<Object> script,
- Handle<Object> stack_trace,
Handle<Object> stack_frames) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSMessageObject(*type,
@@ -1580,7 +1604,6 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
start_position,
end_position,
*script,
- *stack_trace,
*stack_frames),
JSMessageObject);
}
@@ -1630,7 +1653,7 @@ Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateFunction(*isolate()->function_map(),
+ isolate()->heap()->AllocateFunction(*isolate()->sloppy_function_map(),
*function_share,
*prototype),
JSFunction);
@@ -1647,11 +1670,11 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
Handle<String> name,
- LanguageMode language_mode) {
+ StrictMode strict_mode) {
Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- Handle<Map> map = (language_mode == CLASSIC_MODE)
- ? isolate()->function_without_prototype_map()
- : isolate()->strict_mode_function_without_prototype_map();
+ Handle<Map> map = strict_mode == SLOPPY
+ ? isolate()->sloppy_function_without_prototype_map()
+ : isolate()->strict_function_without_prototype_map();
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateFunction(
*map,
@@ -1663,9 +1686,8 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
Handle<String> name,
- LanguageMode language_mode) {
- Handle<JSFunction> fun =
- NewFunctionWithoutPrototypeHelper(name, language_mode);
+ StrictMode strict_mode) {
+ Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name, strict_mode);
fun->set_context(isolate()->context()->native_context());
return fun;
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index db25b09a91..00f20ff8ba 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -44,7 +44,7 @@ class Factory {
Handle<Object> value,
PretenureFlag pretenure = NOT_TENURED);
- // Allocate a new uninitialized fixed array.
+ // Allocates a fixed array initialized with undefined values.
Handle<FixedArray> NewFixedArray(
int size,
PretenureFlag pretenure = NOT_TENURED);
@@ -54,6 +54,9 @@ class Factory {
int size,
PretenureFlag pretenure = NOT_TENURED);
+ // Allocates an uninitialized fixed array. It must be filled by the caller.
+ Handle<FixedArray> NewUninitializedFixedArray(int size);
+
// Allocate a new uninitialized fixed double array.
Handle<FixedDoubleArray> NewFixedDoubleArray(
int size,
@@ -61,7 +64,8 @@ class Factory {
Handle<ConstantPoolArray> NewConstantPoolArray(
int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries);
Handle<SeededNumberDictionary> NewSeededNumberDictionary(
@@ -225,9 +229,6 @@ class Factory {
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
- // Return the internalized version of the passed in string.
- Handle<String> InternalizedStringFromString(Handle<String> value);
-
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
Handle<Struct> NewStruct(InstanceType type);
@@ -287,11 +288,12 @@ class Factory {
Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
Handle<Map> CopyMap(Handle<Map> map);
- Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
- ElementsKind elements_kind);
-
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
+ // This method expects a COW array in new space, and creates a copy
+ // of it in old space.
+ Handle<FixedArray> CopyAndTenureFixedCOWArray(Handle<FixedArray> array);
+
Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
int new_length,
PretenureFlag pretenure = NOT_TENURED);
@@ -326,15 +328,20 @@ class Factory {
// runtime.
Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
+ // JSObject that should have a memento pointing to the allocation site.
+ Handle<JSObject> NewJSObjectWithMemento(Handle<JSFunction> constructor,
+ Handle<AllocationSite> site);
// Global objects are pretenured and initialized based on a constructor.
Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- Handle<JSObject> NewJSObjectFromMap(Handle<Map> map,
- PretenureFlag pretenure = NOT_TENURED,
- bool allocate_properties = true);
+ Handle<JSObject> NewJSObjectFromMap(
+ Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED,
+ bool allocate_properties = true,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
Handle<JSObject> NewJSObjectFromMapForDeoptimizer(
Handle<Map> map, PretenureFlag pretenure = NOT_TENURED);
@@ -345,20 +352,39 @@ class Factory {
// JS arrays are pretenured when allocated by the parser.
Handle<JSArray> NewJSArray(
+ ElementsKind elements_kind,
+ int length,
int capacity,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ ArrayStorageAllocationMode mode = INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArray(
+ int capacity,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ PretenureFlag pretenure = NOT_TENURED) {
+ return NewJSArray(elements_kind, 0, capacity,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, pretenure);
+ }
+
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure = NOT_TENURED);
- void SetElementsCapacityAndLength(Handle<JSArray> array,
- int capacity,
- int length);
+ Handle<JSArray> NewJSArrayWithElements(
+ Handle<FixedArrayBase> elements,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ PretenureFlag pretenure = NOT_TENURED) {
+ return NewJSArrayWithElements(
+ elements, elements_kind, elements->length(), pretenure);
+ }
- void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
+ void NewJSArrayStorage(
+ Handle<JSArray> array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
@@ -379,7 +405,7 @@ class Factory {
Handle<JSFunction> NewFunctionWithoutPrototype(
Handle<String> name,
- LanguageMode language_mode);
+ StrictMode strict_mode);
Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
@@ -438,6 +464,7 @@ class Factory {
Handle<Object> NewReferenceError(const char* message,
Vector< Handle<Object> > args);
+ Handle<Object> NewReferenceError(const char* message, Handle<JSArray> args);
Handle<Object> NewReferenceError(Handle<String> message);
Handle<Object> NewEvalError(const char* message,
@@ -528,7 +555,6 @@ class Factory {
int start_position,
int end_position,
Handle<Object> script,
- Handle<Object> stack_trace,
Handle<Object> stack_frames);
Handle<SeededNumberDictionary> DictionaryAtNumberPut(
@@ -582,7 +608,7 @@ class Factory {
Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
Handle<String> name,
- LanguageMode language_mode);
+ StrictMode strict_mode);
// Create a new map cache.
Handle<MapCache> NewMapCache(int at_least_space_for);
diff --git a/deps/v8/src/feedback-slots.h b/deps/v8/src/feedback-slots.h
new file mode 100644
index 0000000000..9760c652bc
--- /dev/null
+++ b/deps/v8/src/feedback-slots.h
@@ -0,0 +1,110 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FEEDBACK_SLOTS_H_
+#define V8_FEEDBACK_SLOTS_H_
+
+#include "v8.h"
+
+#include "isolate.h"
+
+namespace v8 {
+namespace internal {
+
+enum ComputablePhase {
+ DURING_PARSE,
+ AFTER_SCOPING
+};
+
+
+class FeedbackSlotInterface {
+ public:
+ static const int kInvalidFeedbackSlot = -1;
+
+ virtual ~FeedbackSlotInterface() {}
+
+ // When can we ask how many feedback slots are necessary?
+ virtual ComputablePhase GetComputablePhase() = 0;
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate) = 0;
+ virtual void SetFirstFeedbackSlot(int slot) = 0;
+};
+
+
+class DeferredFeedbackSlotProcessor {
+ public:
+ DeferredFeedbackSlotProcessor()
+ : slot_nodes_(NULL),
+ slot_count_(0) { }
+
+ void add_slot_node(Zone* zone, FeedbackSlotInterface* slot) {
+ if (slot->GetComputablePhase() == DURING_PARSE) {
+ // No need to add to the list
+ int count = slot->ComputeFeedbackSlotCount(zone->isolate());
+ slot->SetFirstFeedbackSlot(slot_count_);
+ slot_count_ += count;
+ } else {
+ if (slot_nodes_ == NULL) {
+ slot_nodes_ = new(zone) ZoneList<FeedbackSlotInterface*>(10, zone);
+ }
+ slot_nodes_->Add(slot, zone);
+ }
+ }
+
+ void ProcessFeedbackSlots(Isolate* isolate) {
+ // Scope analysis must have been done.
+ if (slot_nodes_ == NULL) {
+ return;
+ }
+
+ int current_slot = slot_count_;
+ for (int i = 0; i < slot_nodes_->length(); i++) {
+ FeedbackSlotInterface* slot_interface = slot_nodes_->at(i);
+ int count = slot_interface->ComputeFeedbackSlotCount(isolate);
+ if (count > 0) {
+ slot_interface->SetFirstFeedbackSlot(current_slot);
+ current_slot += count;
+ }
+ }
+
+ slot_count_ = current_slot;
+ slot_nodes_->Clear();
+ }
+
+ int slot_count() {
+ ASSERT(slot_count_ >= 0);
+ return slot_count_;
+ }
+
+ private:
+ ZoneList<FeedbackSlotInterface*>* slot_nodes_;
+ int slot_count_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_FEEDBACK_SLOTS_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index c0eaf16da2..b93d03b59c 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -167,10 +167,7 @@ struct MaybeBoolFlag {
// Flags for language modes and experimental language features.
DEFINE_bool(use_strict, false, "enforce strict mode")
-DEFINE_bool(es5_readonly, true,
- "activate correct semantics for inheriting readonliness")
-DEFINE_bool(es52_globals, true,
- "activate new semantics for global var declarations")
+DEFINE_bool(es_staging, false, "enable upcoming ES6+ features")
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
@@ -178,12 +175,9 @@ DEFINE_bool(harmony_modules, false,
"enable harmony modules (implies block scoping)")
DEFINE_bool(harmony_symbols, false,
"enable harmony symbols (a.k.a. private names)")
-DEFINE_bool(harmony_promises, false, "enable harmony promises")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
- "enable harmony collections (sets, maps, and weak maps)")
-DEFINE_bool(harmony_observation, false,
- "enable harmony object observation (implies harmony collections")
+ "enable harmony collections (sets, maps)")
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony_numeric_literals, false,
@@ -192,22 +186,21 @@ DEFINE_bool(harmony_strings, false, "enable harmony string")
DEFINE_bool(harmony_arrays, false, "enable harmony arrays")
DEFINE_bool(harmony_maths, false, "enable harmony math functions")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
+
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
DEFINE_implication(harmony, harmony_symbols)
-DEFINE_implication(harmony, harmony_promises)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
-DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony, harmony_numeric_literals)
DEFINE_implication(harmony, harmony_strings)
DEFINE_implication(harmony, harmony_arrays)
-DEFINE_implication(harmony, harmony_maths)
-DEFINE_implication(harmony_promises, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
-DEFINE_implication(harmony_observation, harmony_collections)
+
+DEFINE_implication(harmony, es_staging)
+DEFINE_implication(es_staging, harmony_maths)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
@@ -234,7 +227,6 @@ DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields)
DEFINE_implication(track_computed_fields, track_fields)
DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
-DEFINE_bool(smi_x64_store_opt, false, "optimized stores of smi on x64")
// Flags for optimization types.
DEFINE_bool(optimize_for_size, false,
@@ -248,13 +240,15 @@ DEFINE_bool(string_slices, true, "use string slices")
// Flags for Crankshaft.
DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "*", "optimization filter")
-DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_int(gvn_iterations, 3, "maximum number of GVN fix-point iterations")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
DEFINE_bool(use_allocation_folding, true, "use allocation folding")
+DEFINE_bool(use_local_allocation_folding, false, "only fold in basic blocks")
+DEFINE_bool(use_write_barrier_elimination, true,
+ "eliminate write barriers targeting allocations in optimized code")
DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
@@ -275,6 +269,7 @@ DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
DEFINE_bool(trace_load_elimination, false, "trace load elimination")
+DEFINE_bool(trace_store_elimination, false, "trace store elimination")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
@@ -301,6 +296,7 @@ DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
+DEFINE_bool(trace_bce, false, "trace array bounds check elimination")
DEFINE_bool(array_bounds_checks_hoisting, false,
"perform array bounds checks hoisting")
DEFINE_bool(array_index_dehoisting, true,
@@ -309,6 +305,7 @@ DEFINE_bool(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
DEFINE_bool(load_elimination, true, "use load elimination")
DEFINE_bool(check_elimination, true, "use check elimination")
+DEFINE_bool(store_elimination, false, "use store elimination")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
@@ -353,6 +350,9 @@ DEFINE_bool(omit_map_checks_for_leaf_maps, true,
"do not emit check maps for constant values that have a leaf map, "
"deoptimize the optimized code if the layout of the maps changes.")
+DEFINE_int(typed_array_max_size_in_heap, 64,
+ "threshold for in-heap typed array")
+
// Profiler flags.
DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
// 0x1800 fits in the immediate field of an ARM instruction.
@@ -396,6 +396,8 @@ DEFINE_bool(enable_32dregs, ENABLE_32DREGS_DEFAULT,
"enable use of d16-d31 registers on ARM - this requires VFP3")
DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
+DEFINE_bool(force_long_branches, false,
+ "force all emitted branches to be in long mode (MIPS only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@@ -416,10 +418,6 @@ DEFINE_bool(disable_native_files, false, "disable builtin natives files")
// builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation")
-// checks.cc
-DEFINE_bool(stack_trace_on_abort, true,
- "print a stack trace if an assertion failure occurs")
-
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated")
@@ -470,7 +468,7 @@ DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
"in the queue")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
-DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
+DEFINE_bool(hard_abort, true, "abort by crashing")
// execution.cc
// Slightly less than 1MB on 64-bit, since Windows' default stack size for
@@ -535,6 +533,7 @@ DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
DEFINE_int(sweeper_threads, 0,
"number of parallel and concurrent sweeping threads")
+DEFINE_bool(job_based_sweeping, false, "enable job based sweeping")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
@@ -571,6 +570,8 @@ DEFINE_bool(cleanup_code_caches_at_gc, true,
DEFINE_bool(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
+DEFINE_bool(zap_code_space, true,
+ "Zap free memory in code space with 0xCC while sweeping.")
DEFINE_int(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
@@ -582,19 +583,36 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
-// simulator-arm.cc and simulator-mips.cc
+// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
+DEFINE_bool(debug_sim, false, "Enable debugging the simulator")
DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
+#ifdef V8_TARGET_ARCH_ARM64
+DEFINE_int(sim_stack_alignment, 16,
+ "Stack alignment in bytes in simulator. This must be a power of two "
+ "and it must be at least 16. 16 is default.")
+#else
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
+#endif
+DEFINE_int(sim_stack_size, 2 * MB / KB,
+ "Stack size of the ARM64 simulator in kBytes (default is 2 MB)")
+DEFINE_bool(log_regs_modified, true,
+ "When logging register values, only print modified registers.")
+DEFINE_bool(log_colour, true,
+ "When logging, try to use coloured output.")
+DEFINE_bool(ignore_asm_unimplemented_break, false,
+ "Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
+DEFINE_bool(trace_sim_messages, false,
+ "Trace simulator debug messages. Implied by --trace-sim.")
// isolate.cc
+DEFINE_bool(stack_trace_on_illegal, false,
+ "print stack trace when an illegal exception is thrown")
DEFINE_bool(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
-DEFINE_bool(trace_exception, false,
- "print stack trace when throwing exceptions")
DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
@@ -633,7 +651,6 @@ DEFINE_bool(profile_hydrogen_code_stub_compilation, false,
"Print the time it takes to lazily compile hydrogen code stubs.")
DEFINE_bool(predictable, false, "enable predictable mode")
-DEFINE_neg_implication(predictable, randomize_hashes)
DEFINE_neg_implication(predictable, concurrent_recompilation)
DEFINE_neg_implication(predictable, concurrent_osr)
DEFINE_neg_implication(predictable, concurrent_sweeping)
@@ -799,6 +816,11 @@ DEFINE_bool(log_timer_events, false,
"Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
+DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
+DEFINE_string(log_instruction_file, "arm64_inst.csv",
+ "AArch64 instruction statistics log file.")
+DEFINE_int(log_instruction_period, 1 << 22,
+ "AArch64 instruction statistics logging period.")
DEFINE_bool(redirect_code_traces, false,
"output deopt information and disassembly into file "
@@ -806,6 +828,9 @@ DEFINE_bool(redirect_code_traces, false,
DEFINE_string(redirect_code_traces_to, NULL,
"output deopt information and disassembly into the given file")
+DEFINE_bool(hydrogen_track_positions, false,
+ "track source code positions when building IR")
+
//
// Disassembler only flags
//
@@ -838,8 +863,6 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
"printing optimized code based on it")
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
-DEFINE_bool(emit_opt_code_positions, false,
- "annotate optimize code with source code positions")
#ifdef ENABLE_DISASSEMBLER
DEFINE_bool(sodium, false, "print generated code output suitable for use with "
@@ -848,7 +871,7 @@ DEFINE_bool(sodium, false, "print generated code output suitable for use with "
DEFINE_implication(sodium, print_code_stubs)
DEFINE_implication(sodium, print_code)
DEFINE_implication(sodium, print_opt_code)
-DEFINE_implication(sodium, emit_opt_code_positions)
+DEFINE_implication(sodium, hydrogen_track_positions)
DEFINE_implication(sodium, code_comments)
DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
@@ -871,7 +894,7 @@ DEFINE_implication(print_all_code, trace_codegen)
#define FLAG FLAG_READONLY
// assembler-arm.h
-DEFINE_bool(enable_ool_constant_pool, false,
+DEFINE_bool(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL,
"enable use of out-of-line constant pools (ARM only)")
// Cleanup...
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 2b15bfffab..aacb5664a2 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -36,6 +36,8 @@
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/frames-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -199,6 +201,11 @@ inline Address StandardFrame::ComputePCAddress(Address fp) {
}
+inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
+ return fp + StandardFrameConstants::kConstantPoolOffset;
+}
+
+
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
Object* marker =
Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 3b55c276cf..0c47de910d 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -531,6 +531,10 @@ void ExitFrame::ComputeCallerState(State* state) const {
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
+ if (FLAG_enable_ool_constant_pool) {
+ state->constant_pool_address = reinterpret_cast<Address*>(
+ fp() + ExitFrameConstants::kConstantPoolOffset);
+ }
}
@@ -574,6 +578,8 @@ void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->fp = fp;
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(sp - 1 * kPCOnStackSize));
+ state->constant_pool_address =
+ reinterpret_cast<Address*>(fp + ExitFrameConstants::kConstantPoolOffset);
}
@@ -610,6 +616,8 @@ void StandardFrame::ComputeCallerState(State* state) const {
state->fp = caller_fp();
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(ComputePCAddress(fp())));
+ state->constant_pool_address =
+ reinterpret_cast<Address*>(ComputeConstantPoolAddress(fp()));
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index e5b6d3dd02..17f0cb35ab 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -35,7 +35,11 @@
namespace v8 {
namespace internal {
+#if V8_TARGET_ARCH_ARM64
+typedef uint64_t RegList;
+#else
typedef uint32_t RegList;
+#endif
// Get the number of registers in a given register list.
int NumRegs(RegList list);
@@ -221,10 +225,12 @@ class StackFrame BASE_EMBEDDED {
};
struct State {
- State() : sp(NULL), fp(NULL), pc_address(NULL) { }
+ State() : sp(NULL), fp(NULL), pc_address(NULL),
+ constant_pool_address(NULL) { }
Address sp;
Address fp;
Address* pc_address;
+ Address* constant_pool_address;
};
// Copy constructor; it breaks the connection to host iterator
@@ -266,6 +272,11 @@ class StackFrame BASE_EMBEDDED {
Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; }
+ Address constant_pool() const { return *constant_pool_address(); }
+ void set_constant_pool(ConstantPoolArray* constant_pool) {
+ *constant_pool_address() = reinterpret_cast<Address>(constant_pool);
+ }
+
virtual void SetCallerFp(Address caller_fp) = 0;
// Manually changes value of fp in this object.
@@ -273,6 +284,10 @@ class StackFrame BASE_EMBEDDED {
Address* pc_address() const { return state_.pc_address; }
+ Address* constant_pool_address() const {
+ return state_.constant_pool_address;
+ }
+
// Get the id of this stack frame.
Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
@@ -492,6 +507,10 @@ class StandardFrame: public StackFrame {
// by the provided frame pointer.
static inline Address ComputePCAddress(Address fp);
+ // Computes the address of the constant pool field in the standard
+ // frame given by the provided frame pointer.
+ static inline Address ComputeConstantPoolAddress(Address fp);
+
// Iterate over expression stack including stack handlers, locals,
// and parts of the fixed part including context and code fields.
void IterateExpressions(ObjectVisitor* v) const;
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index e14afefda4..fa9ecf41bc 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -345,7 +345,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
- cgen.PopulateTypeFeedbackCells(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_handler_table(*cgen.handler_table());
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -387,6 +386,18 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
}
+void FullCodeGenerator::InitializeFeedbackVector() {
+ int length = info_->function()->slot_count();
+ feedback_vector_ = isolate()->factory()->NewFixedArray(length, TENURED);
+ Handle<Object> sentinel = TypeFeedbackInfo::UninitializedSentinel(isolate());
+ // Ensure that it's safe to set without using a write barrier.
+ ASSERT_EQ(isolate()->heap()->uninitialized_symbol(), *sentinel);
+ for (int i = 0; i < length; i++) {
+ feedback_vector_->set(i, *sentinel, SKIP_WRITE_BARRIER);
+ }
+}
+
+
void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
// Fill in the deoptimization information.
ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
@@ -405,6 +416,7 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
info->set_ic_total_count(ic_total_count_);
+ info->set_feedback_vector(*FeedbackVector());
ASSERT(!isolate()->heap()->InNewSpace(*info));
code->set_type_feedback_info(*info);
}
@@ -425,21 +437,6 @@ void FullCodeGenerator::Initialize() {
}
-void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
- if (type_feedback_cells_.is_empty()) return;
- int length = type_feedback_cells_.length();
- int array_size = TypeFeedbackCells::LengthOfFixedArray(length);
- Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
- isolate()->factory()->NewFixedArray(array_size, TENURED));
- for (int i = 0; i < length; i++) {
- cache->SetAstId(i, type_feedback_cells_[i].ast_id);
- cache->SetCell(i, *type_feedback_cells_[i].cell);
- }
- TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
- *cache);
-}
-
-
void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
PrepareForBailoutForId(node->id(), state);
}
@@ -449,13 +446,13 @@ void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
TypeFeedbackId id) {
ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode);
Handle<Code> ic = LoadIC::initialize_stub(isolate(), extra_state);
- CallIC(ic, contextual_mode, id);
+ CallIC(ic, id);
}
-void FullCodeGenerator::CallStoreIC(ContextualMode mode, TypeFeedbackId id) {
+void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
Handle<Code> ic = StoreIC::initialize_stub(isolate(), strict_mode());
- CallIC(ic, mode, id);
+ CallIC(ic, id);
}
@@ -490,13 +487,6 @@ void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
}
-void FullCodeGenerator::RecordTypeFeedbackCell(
- TypeFeedbackId id, Handle<Cell> cell) {
- TypeFeedbackCellEntry entry = { id, cell };
- type_feedback_cells_.Add(entry, zone());
-}
-
-
void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
ASSERT(masm_->pc_offset() > 0);
@@ -634,7 +624,7 @@ void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
ASSERT(scope->interface()->Index() >= 0);
__ Push(Smi::FromInt(scope->interface()->Index()));
__ Push(scope->GetScopeInfo());
- __ CallRuntime(Runtime::kPushModuleContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
@@ -774,7 +764,7 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
ASSERT(interface->Index() >= 0);
__ Push(Smi::FromInt(interface->Index()));
__ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kPushModuleContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{
@@ -825,10 +815,10 @@ void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) {
int FullCodeGenerator::DeclareGlobalsFlags() {
- ASSERT(DeclareGlobalsLanguageMode::is_valid(language_mode()));
+ ASSERT(DeclareGlobalsStrictMode::is_valid(strict_mode()));
return DeclareGlobalsEvalFlag::encode(is_eval()) |
DeclareGlobalsNativeFlag::encode(is_native()) |
- DeclareGlobalsLanguageMode::encode(language_mode());
+ DeclareGlobalsStrictMode::encode(strict_mode());
}
@@ -893,7 +883,7 @@ void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
}
}
#else
- CodeGenerator::RecordPositions(masm_, pos);
+ CodeGenerator::RecordPositions(masm_, expr->position());
#endif
}
@@ -918,7 +908,6 @@ void FullCodeGenerator::SetSourcePosition(int pos) {
const FullCodeGenerator::InlineFunctionGenerator
FullCodeGenerator::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
@@ -1102,7 +1091,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
{ Comment cmnt(masm_, "[ Extend block context");
__ Push(scope_->GetScopeInfo());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushBlockContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushBlockContext, 2);
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
@@ -1134,7 +1123,7 @@ void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) {
__ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
__ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kPushModuleContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
@@ -1273,7 +1262,7 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
VisitForStackValue(stmt->expression());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushWithContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushWithContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
Scope* saved_scope = scope();
@@ -1426,7 +1415,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
__ Push(stmt->variable()->name());
__ Push(result_register());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushCatchContext, 3);
+ __ CallRuntime(Runtime::kHiddenPushCatchContext, 3);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
@@ -1490,7 +1479,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// rethrow the exception if it returns.
__ Call(&finally_entry);
__ Push(result_register());
- __ CallRuntime(Runtime::kReThrow, 1);
+ __ CallRuntime(Runtime::kHiddenReThrow, 1);
// Finally block implementation.
__ bind(&finally_entry);
@@ -1616,7 +1605,7 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
// Never returns here.
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index d52f3c410c..0d0a6ffedc 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -96,9 +96,6 @@ class FullCodeGenerator: public AstVisitor {
? info->function()->ast_node_count() : 0,
info->zone()),
back_edges_(2, info->zone()),
- type_feedback_cells_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0,
- info->zone()),
ic_total_count_(0) {
Initialize();
}
@@ -130,6 +127,9 @@ class FullCodeGenerator: public AstVisitor {
static const int kCodeSizeMultiplier = 162;
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 142;
+#elif V8_TARGET_ARCH_ARM64
+// TODO(all): Copied ARM value. Check this is sensible for ARM64.
+ static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 142;
#else
@@ -434,9 +434,15 @@ class FullCodeGenerator: public AstVisitor {
void PrepareForBailout(Expression* node, State state);
void PrepareForBailoutForId(BailoutId id, State state);
- // Cache cell support. This associates AST ids with global property cells
- // that will be cleared during GC and collected by the type-feedback oracle.
- void RecordTypeFeedbackCell(TypeFeedbackId id, Handle<Cell> cell);
+ // Feedback slot support. The feedback vector will be cleared during gc and
+ // collected by the type-feedback oracle.
+ Handle<FixedArray> FeedbackVector() {
+ return feedback_vector_;
+ }
+ void StoreFeedbackVectorSlot(int slot, Handle<Object> object) {
+ feedback_vector_->set(slot, *object);
+ }
+ void InitializeFeedbackVector();
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -491,7 +497,6 @@ class FullCodeGenerator: public AstVisitor {
#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
void Emit##name(CallRuntime* expr);
INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
- INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
// Platform-specific code for resuming generators.
@@ -552,6 +557,11 @@ class FullCodeGenerator: public AstVisitor {
void EmitVariableAssignment(Variable* var,
Token::Value op);
+ // Helper functions to EmitVariableAssignment
+ void EmitStoreToStackLocalOrContextSlot(Variable* var,
+ MemOperand location);
+ void EmitCallStoreContextSlot(Handle<String> name, StrictMode strict_mode);
+
// Complete a named property assignment. The receiver is expected on top
// of the stack and the right-hand-side value in the accumulator.
void EmitNamedPropertyAssignment(Assignment* expr);
@@ -562,13 +572,11 @@ class FullCodeGenerator: public AstVisitor {
void EmitKeyedPropertyAssignment(Assignment* expr);
void CallIC(Handle<Code> code,
- ContextualMode mode = NOT_CONTEXTUAL,
TypeFeedbackId id = TypeFeedbackId::None());
void CallLoadIC(ContextualMode mode,
TypeFeedbackId id = TypeFeedbackId::None());
- void CallStoreIC(ContextualMode mode,
- TypeFeedbackId id = TypeFeedbackId::None());
+ void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
@@ -598,11 +606,7 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
- bool is_classic_mode() { return language_mode() == CLASSIC_MODE; }
- StrictModeFlag strict_mode() {
- return is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
- LanguageMode language_mode() { return function()->language_mode(); }
+ StrictMode strict_mode() { return function()->strict_mode(); }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return scope_; }
@@ -635,7 +639,6 @@ class FullCodeGenerator: public AstVisitor {
void Generate();
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
- void PopulateTypeFeedbackCells(Handle<Code> code);
Handle<FixedArray> handler_table() { return handler_table_; }
@@ -650,12 +653,6 @@ class FullCodeGenerator: public AstVisitor {
uint32_t loop_depth;
};
- struct TypeFeedbackCellEntry {
- TypeFeedbackId ast_id;
- Handle<Cell> cell;
- };
-
-
class ExpressionContext BASE_EMBEDDED {
public:
explicit ExpressionContext(FullCodeGenerator* codegen)
@@ -845,9 +842,9 @@ class FullCodeGenerator: public AstVisitor {
ZoneList<BailoutEntry> bailout_entries_;
GrowableBitVector prepared_bailout_ids_;
ZoneList<BackEdgeEntry> back_edges_;
- ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_;
+ Handle<FixedArray> feedback_vector_;
Handle<Cell> profiling_counter_;
bool generate_debug_code_;
diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc
index 5409a4e180..441113b7d8 100644
--- a/deps/v8/src/func-name-inferrer.cc
+++ b/deps/v8/src/func-name-inferrer.cc
@@ -83,11 +83,14 @@ Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
return MakeNameFromStackHelper(pos + 1, prev);
} else {
if (prev->length() > 0) {
+ Handle<String> name = names_stack_.at(pos).name;
+ if (prev->length() + name->length() + 1 > String::kMaxLength) return prev;
Factory* factory = isolate()->factory();
- Handle<String> curr = factory->NewConsString(
- factory->dot_string(), names_stack_.at(pos).name);
- return MakeNameFromStackHelper(pos + 1,
- factory->NewConsString(prev, curr));
+ Handle<String> curr = factory->NewConsString(factory->dot_string(), name);
+ CHECK_NOT_EMPTY_HANDLE(isolate(), curr);
+ curr = factory->NewConsString(prev, curr);
+ CHECK_NOT_EMPTY_HANDLE(isolate(), curr);
+ return MakeNameFromStackHelper(pos + 1, curr);
} else {
return MakeNameFromStackHelper(pos + 1, names_stack_.at(pos).name);
}
diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/func-name-inferrer.h
index f57e778604..41953ffed9 100644
--- a/deps/v8/src/func-name-inferrer.h
+++ b/deps/v8/src/func-name-inferrer.h
@@ -28,9 +28,13 @@
#ifndef V8_FUNC_NAME_INFERRER_H_
#define V8_FUNC_NAME_INFERRER_H_
+#include "handles.h"
+#include "zone.h"
+
namespace v8 {
namespace internal {
+class FunctionLiteral;
class Isolate;
// FuncNameInferrer is a stateful class that is used to perform name
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 09449791f4..e06f794828 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -235,10 +235,12 @@ class GlobalHandles::Node {
weak_callback_ = weak_callback;
}
- void ClearWeakness() {
+ void* ClearWeakness() {
ASSERT(state() != FREE);
+ void* p = parameter();
set_state(NORMAL);
set_parameter(NULL);
+ return p;
}
bool PostGarbageCollectionProcessing(Isolate* isolate) {
@@ -271,7 +273,7 @@ class GlobalHandles::Node {
}
// Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
- ASSERT(state() != NEAR_DEATH);
+ CHECK(state() != NEAR_DEATH);
return true;
}
@@ -502,8 +504,8 @@ void GlobalHandles::MakeWeak(Object** location,
}
-void GlobalHandles::ClearWeakness(Object** location) {
- Node::FromLocation(location)->ClearWeakness();
+void* GlobalHandles::ClearWeakness(Object** location) {
+ return Node::FromLocation(location)->ClearWeakness();
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index a40645199c..13fc111d81 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -161,7 +161,7 @@ class GlobalHandles {
}
// Clear the weakness of a global handle.
- static void ClearWeakness(Object** location);
+ static void* ClearWeakness(Object** location);
// Clear the weakness of a global handle.
static void MarkIndependent(Object** location);
@@ -340,6 +340,7 @@ class EternalHandles {
enum SingletonHandle {
I18N_TEMPLATE_ONE,
I18N_TEMPLATE_TWO,
+ DATE_CACHE_VERSION,
NUMBER_OF_SINGLETON_HANDLES
};
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index b9437f2ac4..db666d804b 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -71,6 +71,10 @@ namespace internal {
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(__AARCH64EL__)
+#define V8_HOST_ARCH_ARM64 1
+#define V8_HOST_ARCH_64_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
@@ -78,7 +82,7 @@ namespace internal {
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
-#error Host architecture was not detected as supported by v8
+#error "Host architecture was not detected as supported by v8"
#endif
#if defined(__ARM_ARCH_7A__) || \
@@ -95,11 +99,13 @@ namespace internal {
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_IA32 1
+#elif defined(__AARCH64EL__)
+#define V8_TARGET_ARCH_ARM64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(__MIPSEL__)
@@ -119,6 +125,9 @@ namespace internal {
#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
#error Target architecture arm is only supported on arm and ia32 host
#endif
+#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64))
+#error Target architecture arm64 is only supported on arm64 and x64 host
+#endif
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
@@ -127,6 +136,9 @@ namespace internal {
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
+#if (V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64)
+#define USE_SIMULATOR 1
+#endif
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
@@ -142,12 +154,17 @@ namespace internal {
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM
#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_ARM64
+#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#define V8_TARGET_LITTLE_ENDIAN 1
#else
#error Unknown target architecture endiannes
#endif
+// Determine whether the architecture uses an out-of-line constant pool.
+#define V8_OOL_CONSTANT_POOL 0
+
// Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
// For instance, 'bool b = "false";' results in b == true! This is a hidden
@@ -376,6 +393,12 @@ F FUNCTION_CAST(Address addr) {
#define DISABLE_ASAN
#endif
+#if V8_CC_GNU
+#define V8_IMMEDIATE_CRASH() __builtin_trap()
+#else
+#define V8_IMMEDIATE_CRASH() ((void(*)())0)()
+#endif
+
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
@@ -387,34 +410,9 @@ template <typename T, class P = FreeStoreAllocationPolicy> class List;
// -----------------------------------------------------------------------------
// Declarations for use in both the preparser and the rest of V8.
-// The different language modes that V8 implements. ES5 defines two language
-// modes: an unrestricted mode respectively a strict mode which are indicated by
-// CLASSIC_MODE respectively STRICT_MODE in the enum. The harmony spec drafts
-// for the next ES standard specify a new third mode which is called 'extended
-// mode'. The extended mode is only available if the harmony flag is set. It is
-// based on the 'strict mode' and adds new functionality to it. This means that
-// most of the semantics of these two modes coincide.
-//
-// In the current draft the term 'base code' is used to refer to code that is
-// neither in strict nor extended mode. However, the more distinguishing term
-// 'classic mode' is used in V8 instead to avoid mix-ups.
-
-enum LanguageMode {
- CLASSIC_MODE,
- STRICT_MODE,
- EXTENDED_MODE
-};
-
-
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-//
-// This flag is used in the backend to represent the language mode. So far
-// there is no semantic difference between the strict and the extended mode in
-// the backend, so both modes are represented by the kStrictMode value.
-enum StrictModeFlag {
- kNonStrictMode,
- kStrictMode
-};
+
+enum StrictMode { SLOPPY, STRICT };
} } // namespace v8::internal
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 22bbd7cd7c..a25b4a2266 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -97,7 +97,8 @@ bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
if (!AllowHandleDereference::IsAllowed()) return false;
if (mode == INCLUDE_DEFERRED_CHECK &&
!AllowDeferredHandleDereference::IsAllowed()) {
- // Accessing maps and internalized strings is safe.
+ // Accessing cells, maps and internalized strings is safe.
+ if (heap_object->IsCell()) return true;
if (heap_object->IsMap()) return true;
if (heap_object->IsInternalizedString()) return true;
return !heap->isolate()->IsDeferredHandle(handle);
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 830eb09602..398a68265c 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -509,7 +509,7 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
Isolate* isolate = object->GetIsolate();
Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
- isolate->context()->native_context()->arguments_boilerplate(),
+ isolate->context()->native_context()->sloppy_arguments_boilerplate(),
isolate);
Handle<JSFunction> arguments_function = Handle<JSFunction>(
JSFunction::cast(arguments_boilerplate->map()->constructor()),
@@ -537,10 +537,10 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
// Check access rights if required.
if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*current,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(current,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(current, v8::ACCESS_KEYS);
if (isolate->has_scheduled_exception()) {
isolate->PromoteScheduledException();
*threw = true;
@@ -712,35 +712,12 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
return ReduceFixedArrayTo(storage, enum_size);
} else {
Handle<NameDictionary> dictionary(object->property_dictionary());
-
- int length = dictionary->NumberOfElements();
+ int length = dictionary->NumberOfEnumElements();
if (length == 0) {
return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
}
-
- // The enumeration array is generated by allocating an array big enough to
- // hold all properties that have been seen, whether they are are deleted or
- // not. Subsequently all visible properties are added to the array. If some
- // properties were not visible, the array is trimmed so it only contains
- // visible properties. This improves over adding elements and sorting by
- // index by having linear complexity rather than n*log(n).
-
- // By comparing the monotonous NextEnumerationIndex to the NumberOfElements,
- // we can predict the number of holes in the final array. If there will be
- // more than 50% holes, regenerate the enumeration indices to reduce the
- // number of holes to a minimum. This avoids allocating a large array if
- // many properties were added but subsequently deleted.
- int next_enumeration = dictionary->NextEnumerationIndex();
- if (!object->IsGlobalObject() && next_enumeration > (length * 3) / 2) {
- NameDictionary::DoGenerateNewEnumerationIndices(dictionary);
- next_enumeration = dictionary->NextEnumerationIndex();
- }
-
- Handle<FixedArray> storage =
- isolate->factory()->NewFixedArray(next_enumeration);
-
- storage = Handle<FixedArray>(dictionary->CopyEnumKeysTo(*storage));
- ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_SHOW));
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
+ dictionary->CopyEnumKeysTo(*storage);
return storage;
}
}
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
index 2cedebaae1..d37d875385 100644
--- a/deps/v8/src/harmony-array.js
+++ b/deps/v8/src/harmony-array.js
@@ -51,7 +51,7 @@ function ArrayFind(predicate /* thisArg */) { // length == 1
if (IS_NULL_OR_UNDEFINED(thisArg)) {
thisArg = %GetDefaultReceiver(predicate) || thisArg;
- } else if (!IS_SPEC_OBJECT(thisArg) && %IsClassicModeFunction(predicate)) {
+ } else if (!IS_SPEC_OBJECT(thisArg) && %IsSloppyModeFunction(predicate)) {
thisArg = ToObject(thisArg);
}
@@ -86,7 +86,7 @@ function ArrayFindIndex(predicate /* thisArg */) { // length == 1
if (IS_NULL_OR_UNDEFINED(thisArg)) {
thisArg = %GetDefaultReceiver(predicate) || thisArg;
- } else if (!IS_SPEC_OBJECT(thisArg) && %IsClassicModeFunction(predicate)) {
+ } else if (!IS_SPEC_OBJECT(thisArg) && %IsSloppyModeFunction(predicate)) {
thisArg = ToObject(thisArg);
}
diff --git a/deps/v8/src/harmony-math.js b/deps/v8/src/harmony-math.js
index d57a104042..298fa58cb2 100644
--- a/deps/v8/src/harmony-math.js
+++ b/deps/v8/src/harmony-math.js
@@ -59,8 +59,7 @@ function MathSinh(x) {
// ES6 draft 09-27-13, section 20.2.2.12.
function MathCosh(x) {
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- // Idempotent for NaN and +/-Infinity.
- if (!NUMBER_IS_FINITE(x)) return x;
+ if (!NUMBER_IS_FINITE(x)) return MathAbs(x);
return (MathExp(x) + MathExp(-x)) / 2;
}
@@ -110,19 +109,19 @@ function MathAtanh(x) {
}
-//ES6 draft 09-27-13, section 20.2.2.21.
+// ES6 draft 09-27-13, section 20.2.2.21.
function MathLog10(x) {
return MathLog(x) * 0.434294481903251828; // log10(x) = log(x)/log(10).
}
-//ES6 draft 09-27-13, section 20.2.2.22.
+// ES6 draft 09-27-13, section 20.2.2.22.
function MathLog2(x) {
return MathLog(x) * 1.442695040888963407; // log2(x) = log(x)/log(2).
}
-//ES6 draft 09-27-13, section 20.2.2.17.
+// ES6 draft 09-27-13, section 20.2.2.17.
function MathHypot(x, y) { // Function length is 2.
// We may want to introduce fast paths for two arguments and when
// normalization to avoid overflow is not necessary. For now, we
@@ -155,6 +154,93 @@ function MathHypot(x, y) { // Function length is 2.
}
+// ES6 draft 09-27-13, section 20.2.2.16.
+function MathFround(x) {
+ return %Math_fround(TO_NUMBER_INLINE(x));
+}
+
+
+function MathClz32(x) {
+ x = ToUint32(TO_NUMBER_INLINE(x));
+ if (x == 0) return 32;
+ var result = 0;
+ // Binary search.
+ if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; };
+ if ((x & 0xFF000000) === 0) { x <<= 8; result += 8; };
+ if ((x & 0xF0000000) === 0) { x <<= 4; result += 4; };
+ if ((x & 0xC0000000) === 0) { x <<= 2; result += 2; };
+ if ((x & 0x80000000) === 0) { x <<= 1; result += 1; };
+ return result;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.9.
+// Cube root approximation, refer to: http://metamerist.com/cbrt/cbrt.htm
+// Using initial approximation adapted from Kahan's cbrt and 4 iterations
+// of Newton's method.
+function MathCbrt(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ if (x == 0 || !NUMBER_IS_FINITE(x)) return x;
+ return x >= 0 ? CubeRoot(x) : -CubeRoot(-x);
+}
+
+macro NEWTON_ITERATION_CBRT(x, approx)
+ (1.0 / 3.0) * (x / (approx * approx) + 2 * approx);
+endmacro
+
+function CubeRoot(x) {
+ var approx_hi = MathFloor(%_DoubleHi(x) / 3) + 0x2A9F7893;
+ var approx = %_ConstructDouble(approx_hi, 0);
+ approx = NEWTON_ITERATION_CBRT(x, approx);
+ approx = NEWTON_ITERATION_CBRT(x, approx);
+ approx = NEWTON_ITERATION_CBRT(x, approx);
+ return NEWTON_ITERATION_CBRT(x, approx);
+}
+
+
+
+// ES6 draft 09-27-13, section 20.2.2.14.
+// Use Taylor series to approximate.
+// exp(x) - 1 at 0 == -1 + exp(0) + exp'(0)*x/1! + exp''(0)*x^2/2! + ...
+// == x/1! + x^2/2! + x^3/3! + ...
+// The closer x is to 0, the fewer terms are required.
+function MathExpm1(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ var xabs = MathAbs(x);
+ if (xabs < 2E-7) {
+ return x * (1 + x * (1/2));
+ } else if (xabs < 6E-5) {
+ return x * (1 + x * (1/2 + x * (1/6)));
+ } else if (xabs < 2E-2) {
+ return x * (1 + x * (1/2 + x * (1/6 +
+ x * (1/24 + x * (1/120 + x * (1/720))))));
+ } else { // Use regular exp if not close enough to 0.
+ return MathExp(x) - 1;
+ }
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.20.
+// Use Taylor series to approximate. With y = x + 1;
+// log(y) at 1 == log(1) + log'(1)(y-1)/1! + log''(1)(y-1)^2/2! + ...
+// == 0 + x - x^2/2 + x^3/3 ...
+// The closer x is to 0, the fewer terms are required.
+function MathLog1p(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ var xabs = MathAbs(x);
+ if (xabs < 1E-7) {
+ return x * (1 - x * (1/2));
+ } else if (xabs < 3E-5) {
+ return x * (1 - x * (1/2 - x * (1/3)));
+ } else if (xabs < 7E-3) {
+ return x * (1 - x * (1/2 - x * (1/3 - x * (1/4 -
+ x * (1/5 - x * (1/6 - x * (1/7)))))));
+ } else { // Use regular log if not close enough to 0.
+ return MathLog(1 + x);
+ }
+}
+
+
function ExtendMath() {
%CheckIsBootstrapping();
@@ -170,8 +256,14 @@ function ExtendMath() {
"atanh", MathAtanh,
"log10", MathLog10,
"log2", MathLog2,
- "hypot", MathHypot
+ "hypot", MathHypot,
+ "fround", MathFround,
+ "clz32", MathClz32,
+ "cbrt", MathCbrt,
+ "log1p", MathLog1p,
+ "expm1", MathExpm1
));
}
+
ExtendMath();
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 35bad4af39..063cf30ff3 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -137,8 +137,8 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
uint32_t hash_field) {
- if (str.length() > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x2);
+ if (str.length() > String::kMaxLength) {
+ return isolate()->ThrowInvalidStringLength();
}
// Compute map and object size.
Map* map = ascii_internalized_string_map();
@@ -170,8 +170,8 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
uint32_t hash_field) {
- if (str.length() > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x3);
+ if (str.length() > String::kMaxLength) {
+ return isolate()->ThrowInvalidStringLength();
}
// Compute map and object size.
Map* map = internalized_string_map();
@@ -223,7 +223,7 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
HeapProfiler* profiler = isolate_->heap_profiler();
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
- !disallow_allocation_failure_ &&
+ AllowAllocationFailure::IsAllowed(isolate_) &&
Heap::allocation_timeout_-- <= 0) {
return Failure::RetryAfterGC(space);
}
@@ -490,7 +490,8 @@ void Heap::ScavengePointer(HeapObject** p) {
}
-void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
+ ScratchpadSlotMode mode) {
Heap* heap = object->GetHeap();
ASSERT(heap->InFromSpace(object));
@@ -518,7 +519,7 @@ void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
if (!memento->IsValid()) return;
if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
- heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite());
+ heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode);
}
}
@@ -541,7 +542,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
- UpdateAllocationSiteFeedback(object);
+ UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
// AllocationMementos are unrooted and shouldn't survive a scavenge
ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
@@ -640,35 +641,26 @@ Isolate* Heap::isolate() {
// Warning: Do not use the identifiers __object__, __maybe_object__ or
// __scope__ in a call to this macro.
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY, OOM)\
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
do { \
GC_GREEDY_CHECK(ISOLATE); \
MaybeObject* __maybe_object__ = FUNCTION_CALL; \
Object* __object__ = NULL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- OOM; \
- } \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
(ISOLATE)->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
allocation_space(), \
"allocation failure"); \
__maybe_object__ = FUNCTION_CALL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- OOM; \
- } \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
(ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
(ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
{ \
- AlwaysAllocateScope __scope__; \
+ AlwaysAllocateScope __scope__(ISOLATE); \
__maybe_object__ = FUNCTION_CALL; \
} \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- OOM; \
- } \
if (__maybe_object__->IsRetryAfterGC()) { \
/* TODO(1181417): Fix this. */ \
v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true);\
@@ -682,8 +674,7 @@ Isolate* Heap::isolate() {
ISOLATE, \
FUNCTION_CALL, \
RETURN_VALUE, \
- RETURN_EMPTY, \
- v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY", true))
+ RETURN_EMPTY)
#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
CALL_AND_RETRY_OR_DIE(ISOLATE, \
@@ -700,7 +691,6 @@ Isolate* Heap::isolate() {
CALL_AND_RETRY(ISOLATE, \
FUNCTION_CALL, \
return __object__, \
- return __maybe_object__, \
return __maybe_object__)
@@ -777,21 +767,20 @@ void Heap::CompletelyClearInstanceofCache() {
}
-AlwaysAllocateScope::AlwaysAllocateScope() {
+AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
+ : heap_(isolate->heap()), daf_(isolate) {
// We shouldn't hit any nested scopes, because that requires
// non-handle code to call handle code. The code still works but
// performance will degrade, so we want to catch this situation
// in debug mode.
- Isolate* isolate = Isolate::Current();
- ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
- isolate->heap()->always_allocate_scope_depth_++;
+ ASSERT(heap_->always_allocate_scope_depth_ == 0);
+ heap_->always_allocate_scope_depth_++;
}
AlwaysAllocateScope::~AlwaysAllocateScope() {
- Isolate* isolate = Isolate::Current();
- isolate->heap()->always_allocate_scope_depth_--;
- ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
+ heap_->always_allocate_scope_depth_--;
+ ASSERT(heap_->always_allocate_scope_depth_ == 0);
}
@@ -809,6 +798,21 @@ NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
#endif
+GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
+ heap_->gc_callbacks_depth_++;
+}
+
+
+GCCallbacksScope::~GCCallbacksScope() {
+ heap_->gc_callbacks_depth_--;
+}
+
+
+bool GCCallbacksScope::CheckReenter() {
+ return heap_->gc_callbacks_depth_ == 1;
+}
+
+
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
@@ -820,25 +824,15 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
}
-double GCTracer::SizeOfHeapObjects() {
- return (static_cast<double>(heap_->SizeOfObjects())) / MB;
-}
-
-
-DisallowAllocationFailure::DisallowAllocationFailure() {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
- old_state_ = isolate->heap()->disallow_allocation_failure_;
- isolate->heap()->disallow_allocation_failure_ = true;
-#endif
+void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ CHECK((*current)->IsSmi());
+ }
}
-DisallowAllocationFailure::~DisallowAllocationFailure() {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
- isolate->heap()->disallow_allocation_failure_ = old_state_;
-#endif
+double GCTracer::SizeOfHeapObjects() {
+ return (static_cast<double>(heap_->SizeOfObjects())) / MB;
}
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 7413b6e688..1dc1113214 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -168,7 +168,10 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
- ids_->MoveObject(from, to, size);
+ bool known_object = ids_->MoveObject(from, to, size);
+ if (!known_object && !allocation_tracker_.is_empty()) {
+ allocation_tracker_->address_to_trace()->MoveObject(from, to, size);
+ }
}
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index ccfbfb8d03..332d0dbf6f 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -34,6 +34,7 @@
#include "heap-profiler.h"
#include "debug.h"
#include "types.h"
+#include "v8conversions.h"
namespace v8 {
namespace internal {
@@ -72,14 +73,16 @@ HeapEntry::HeapEntry(HeapSnapshot* snapshot,
Type type,
const char* name,
SnapshotObjectId id,
- int self_size)
+ size_t self_size,
+ unsigned trace_node_id)
: type_(type),
children_count_(0),
children_index_(-1),
self_size_(self_size),
- id_(id),
snapshot_(snapshot),
- name_(name) { }
+ name_(name),
+ id_(id),
+ trace_node_id_(trace_node_id) { }
void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
@@ -103,7 +106,7 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6d @%6u %*c %s%s: ",
+ OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ",
self_size(), id(), indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
@@ -188,12 +191,12 @@ template <size_t ptr_size> struct SnapshotSizeConstants;
template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 24;
+ static const int kExpectedHeapEntrySize = 28;
};
template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 32;
+ static const int kExpectedHeapEntrySize = 40;
};
} // namespace
@@ -242,6 +245,7 @@ HeapEntry* HeapSnapshot::AddRootEntry() {
HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
"",
HeapObjectsMap::kInternalRootObjectId,
+ 0,
0);
root_index_ = entry->index();
ASSERT(root_index_ == 0);
@@ -254,6 +258,7 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry() {
HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
"(GC roots)",
HeapObjectsMap::kGcRootsObjectId,
+ 0,
0);
gc_roots_index_ = entry->index();
return entry;
@@ -267,6 +272,7 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
HeapEntry::kSynthetic,
VisitorSynchronization::kTagNames[tag],
HeapObjectsMap::GetNthGcSubrootId(tag),
+ 0,
0);
gc_subroot_indexes_[tag] = entry->index();
return entry;
@@ -276,8 +282,9 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- int size) {
- HeapEntry entry(this, type, name, id, size);
+ size_t size,
+ unsigned trace_node_id) {
+ HeapEntry entry(this, type, name, id, size, trace_node_id);
entries_.Add(entry);
return &entries_.last();
}
@@ -389,10 +396,10 @@ HeapObjectsMap::HeapObjectsMap(Heap* heap)
}
-void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
+bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
ASSERT(to != NULL);
ASSERT(from != NULL);
- if (from == to) return;
+ if (from == to) return false;
void* from_value = entries_map_.Remove(from, ComputePointerHash(from));
if (from_value == NULL) {
// It may occur that some untracked object moves to an address X and there
@@ -433,6 +440,7 @@ void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
entries_.at(from_entry_info_index).size = object_size;
to_entry->value = from_value;
}
+ return from_value != NULL;
}
@@ -899,17 +907,88 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name) {
- int object_size = object->Size();
- SnapshotObjectId object_id =
- heap_object_map_->FindOrAddEntry(object->address(), object_size);
- return snapshot_->AddEntry(type, name, object_id, object_size);
+ return AddEntry(object->address(), type, name, object->Size());
+}
+
+
+HeapEntry* V8HeapExplorer::AddEntry(Address address,
+ HeapEntry::Type type,
+ const char* name,
+ size_t size) {
+ SnapshotObjectId object_id = heap_object_map_->FindOrAddEntry(
+ address, static_cast<unsigned int>(size));
+ unsigned trace_node_id = 0;
+ if (AllocationTracker* allocation_tracker =
+ snapshot_->profiler()->allocation_tracker()) {
+ trace_node_id =
+ allocation_tracker->address_to_trace()->GetTraceNodeId(address);
+ }
+ return snapshot_->AddEntry(type, name, object_id, size, trace_node_id);
}
+class SnapshotFiller {
+ public:
+ explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
+ : snapshot_(snapshot),
+ names_(snapshot->profiler()->names()),
+ entries_(entries) { }
+ HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = allocator->AllocateEntry(ptr);
+ entries_->Pair(ptr, entry->index());
+ return entry;
+ }
+ HeapEntry* FindEntry(HeapThing ptr) {
+ int index = entries_->Map(ptr);
+ return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
+ }
+ HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = FindEntry(ptr);
+ return entry != NULL ? entry : AddEntry(ptr, allocator);
+ }
+ void SetIndexedReference(HeapGraphEdge::Type type,
+ int parent,
+ int index,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ parent_entry->SetIndexedReference(type, index, child_entry);
+ }
+ void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ int index = parent_entry->children_count() + 1;
+ parent_entry->SetIndexedReference(type, index, child_entry);
+ }
+ void SetNamedReference(HeapGraphEdge::Type type,
+ int parent,
+ const char* reference_name,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ parent_entry->SetNamedReference(type, reference_name, child_entry);
+ }
+ void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ int index = parent_entry->children_count() + 1;
+ parent_entry->SetNamedReference(
+ type,
+ names_->GetName(index),
+ child_entry);
+ }
+
+ private:
+ HeapSnapshot* snapshot_;
+ StringsStorage* names_;
+ HeapEntriesMap* entries_;
+};
+
+
class GcSubrootsEnumerator : public ObjectVisitor {
public:
GcSubrootsEnumerator(
- SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
+ SnapshotFiller* filler, V8HeapExplorer* explorer)
: filler_(filler),
explorer_(explorer),
previous_object_count_(0),
@@ -926,14 +1005,14 @@ class GcSubrootsEnumerator : public ObjectVisitor {
}
}
private:
- SnapshotFillerInterface* filler_;
+ SnapshotFiller* filler_;
V8HeapExplorer* explorer_;
intptr_t previous_object_count_;
intptr_t object_count_;
};
-void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
+void V8HeapExplorer::AddRootEntries(SnapshotFiller* filler) {
filler->AddEntry(kInternalRootObject, this);
filler->AddEntry(kGcRootsObject, this);
GcSubrootsEnumerator enumerator(filler, this);
@@ -1029,6 +1108,8 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
if (obj->IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
+ } else if (obj->IsJSArrayBuffer()) {
+ ExtractJSArrayBufferReferences(entry, JSArrayBuffer::cast(obj));
} else if (obj->IsJSObject()) {
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
@@ -1147,13 +1228,6 @@ void V8HeapExplorer::ExtractJSObjectReferences(
JSArrayBufferView::kBufferOffset);
SetWeakReference(view, entry, "weak_next", view->weak_next(),
JSArrayBufferView::kWeakNextOffset);
- } else if (obj->IsJSArrayBuffer()) {
- JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
- SetWeakReference(buffer, entry, "weak_next", buffer->weak_next(),
- JSArrayBuffer::kWeakNextOffset);
- SetWeakReference(buffer, entry,
- "weak_first_view", buffer->weak_first_view(),
- JSArrayBuffer::kWeakFirstViewOffset);
}
TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
@@ -1204,7 +1278,8 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
}
#define EXTRACT_CONTEXT_FIELD(index, type, name) \
- if (Context::index < Context::FIRST_WEAK_SLOT) { \
+ if (Context::index < Context::FIRST_WEAK_SLOT || \
+ Context::index == Context::MAP_CACHE_INDEX) { \
SetInternalReference(context, entry, #name, context->get(Context::index), \
FixedArray::OffsetOfElementAt(Context::index)); \
} else { \
@@ -1339,9 +1414,6 @@ void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
"name", script->name(),
Script::kNameOffset);
SetInternalReference(obj, entry,
- "data", script->data(),
- Script::kDataOffset);
- SetInternalReference(obj, entry,
"context_data", script->context_data(),
Script::kContextOffset);
TagObject(script->line_ends(), "(script line ends)");
@@ -1454,6 +1526,42 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
}
+class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
+ public:
+ JSArrayBufferDataEntryAllocator(size_t size, V8HeapExplorer* explorer)
+ : size_(size)
+ , explorer_(explorer) {
+ }
+ virtual HeapEntry* AllocateEntry(HeapThing ptr) {
+ return explorer_->AddEntry(
+ static_cast<Address>(ptr),
+ HeapEntry::kNative, "system / JSArrayBufferData", size_);
+ }
+ private:
+ size_t size_;
+ V8HeapExplorer* explorer_;
+};
+
+
+void V8HeapExplorer::ExtractJSArrayBufferReferences(
+ int entry, JSArrayBuffer* buffer) {
+ SetWeakReference(buffer, entry, "weak_next", buffer->weak_next(),
+ JSArrayBuffer::kWeakNextOffset);
+ SetWeakReference(buffer, entry,
+ "weak_first_view", buffer->weak_first_view(),
+ JSArrayBuffer::kWeakFirstViewOffset);
+ // Setup a reference to a native memory backing_store object.
+ if (!buffer->backing_store())
+ return;
+ size_t data_size = NumberToSize(heap_->isolate(), buffer->byte_length());
+ JSArrayBufferDataEntryAllocator allocator(data_size, this);
+ HeapEntry* data_entry =
+ filler_->FindOrAddEntry(buffer->backing_store(), &allocator);
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ entry, "backing_store", data_entry);
+}
+
+
void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
if (!js_obj->IsJSFunction()) return;
@@ -1712,7 +1820,7 @@ class RootsReferencesExtractor : public ObjectVisitor {
bool V8HeapExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
+ SnapshotFiller* filler) {
filler_ = filler;
// Make sure builtin code objects get their builtin tags
@@ -2104,7 +2212,8 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
entries_type_,
name,
heap_object_map_->GenerateId(info),
- size != -1 ? static_cast<int>(size) : 0);
+ size != -1 ? static_cast<int>(size) : 0,
+ 0);
}
@@ -2222,7 +2331,7 @@ List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
bool NativeObjectsExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
+ SnapshotFiller* filler) {
filler_ = filler;
FillRetainedObjects();
FillImplicitReferences();
@@ -2349,64 +2458,6 @@ void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
}
-class SnapshotFiller : public SnapshotFillerInterface {
- public:
- explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
- : snapshot_(snapshot),
- names_(snapshot->profiler()->names()),
- entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = allocator->AllocateEntry(ptr);
- entries_->Pair(ptr, entry->index());
- return entry;
- }
- HeapEntry* FindEntry(HeapThing ptr) {
- int index = entries_->Map(ptr);
- return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
- }
- HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = FindEntry(ptr);
- return entry != NULL ? entry : AddEntry(ptr, allocator);
- }
- void SetIndexedReference(HeapGraphEdge::Type type,
- int parent,
- int index,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetNamedReference(HeapGraphEdge::Type type,
- int parent,
- const char* reference_name,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetNamedReference(type, reference_name, child_entry);
- }
- void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetNamedReference(
- type,
- names_->GetName(index),
- child_entry);
- }
-
- private:
- HeapSnapshot* snapshot_;
- StringsStorage* names_;
- HeapEntriesMap* entries_;
-};
-
-
HeapSnapshotGenerator::HeapSnapshotGenerator(
HeapSnapshot* snapshot,
v8::ActivityControl* control,
@@ -2603,8 +2654,8 @@ class OutputStreamWriter {
// type, name|index, to_node.
const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
-// type, name, id, self_size, children_index.
-const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
+// type, name, id, self_size, edge_count, trace_node_id.
+const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 6;
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
if (AllocationTracker* allocation_tracker =
@@ -2663,9 +2714,26 @@ int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
}
-static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
+namespace {
+
+template<size_t size> struct ToUnsigned;
+
+template<> struct ToUnsigned<4> {
+ typedef uint32_t Type;
+};
+
+template<> struct ToUnsigned<8> {
+ typedef uint64_t Type;
+};
+
+} // namespace
+
+
+template<typename T>
+static int utoa_impl(T value, const Vector<char>& buffer, int buffer_pos) {
+ STATIC_CHECK(static_cast<T>(-1) > 0); // Check that T is unsigned
int number_of_digits = 0;
- unsigned t = value;
+ T t = value;
do {
++number_of_digits;
} while (t /= 10);
@@ -2673,7 +2741,7 @@ static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
buffer_pos += number_of_digits;
int result = buffer_pos;
do {
- int last_digit = value % 10;
+ int last_digit = static_cast<int>(value % 10);
buffer[--buffer_pos] = '0' + last_digit;
value /= 10;
} while (value);
@@ -2681,6 +2749,14 @@ static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
}
+template<typename T>
+static int utoa(T value, const Vector<char>& buffer, int buffer_pos) {
+ typename ToUnsigned<sizeof(value)>::Type unsigned_value = value;
+ STATIC_CHECK(sizeof(value) == sizeof(unsigned_value));
+ return utoa_impl(unsigned_value, buffer, buffer_pos);
+}
+
+
void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
bool first_edge) {
// The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
@@ -2717,10 +2793,11 @@ void HeapSnapshotJSONSerializer::SerializeEdges() {
void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
- // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
+ // The buffer needs space for 4 unsigned ints, 1 size_t, 5 commas, \n and \0
static const int kBufferSize =
5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
- + 5 + 1 + 1;
+ + MaxDecimalDigitsIn<sizeof(size_t)>::kUnsigned // NOLINT
+ + 6 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
int buffer_pos = 0;
if (entry_index(entry) != 0) {
@@ -2735,6 +2812,8 @@ void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(entry->children_count(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry->trace_node_id(), buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
@@ -2768,7 +2847,8 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("name") ","
JSON_S("id") ","
JSON_S("self_size") ","
- JSON_S("edge_count")) ","
+ JSON_S("edge_count") ","
+ JSON_S("trace_node_id")) ","
JSON_S("node_types") ":" JSON_A(
JSON_A(
JSON_S("hidden") ","
@@ -2813,7 +2893,7 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("column")) ","
JSON_S("trace_node_fields") ":" JSON_A(
JSON_S("id") ","
- JSON_S("function_id") ","
+ JSON_S("function_info_index") ","
JSON_S("count") ","
JSON_S("size") ","
JSON_S("children"))));
@@ -2828,7 +2908,7 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
uint32_t count = 0;
AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
if (tracker) {
- count = tracker->id_to_function_info()->occupancy();
+ count = tracker->function_info_list().length();
}
writer_->AddNumber(count);
}
@@ -2861,7 +2941,7 @@ void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode* node) {
int buffer_pos = 0;
buffer_pos = utoa(node->id(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(node->function_id(), buffer, buffer_pos);
+ buffer_pos = utoa(node->function_info_index(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(node->allocation_count(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
@@ -2903,22 +2983,18 @@ void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
6 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ 6 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
- HashMap* id_to_function_info = tracker->id_to_function_info();
+ const List<AllocationTracker::FunctionInfo*>& list =
+ tracker->function_info_list();
bool first_entry = true;
- for (HashMap::Entry* p = id_to_function_info->Start();
- p != NULL;
- p = id_to_function_info->Next(p)) {
- SnapshotObjectId id =
- static_cast<SnapshotObjectId>(reinterpret_cast<intptr_t>(p->key));
- AllocationTracker::FunctionInfo* info =
- reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
+ for (int i = 0; i < list.length(); i++) {
+ AllocationTracker::FunctionInfo* info = list[i];
int buffer_pos = 0;
if (first_entry) {
first_entry = false;
} else {
buffer[buffer_pos++] = ',';
}
- buffer_pos = utoa(id, buffer, buffer_pos);
+ buffer_pos = utoa(info->function_id, buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(GetStringId(info->name), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index e209eeabb1..634ede19ab 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -37,6 +37,7 @@ class AllocationTracker;
class AllocationTraceNode;
class HeapEntry;
class HeapSnapshot;
+class SnapshotFiller;
class HeapGraphEdge BASE_EMBEDDED {
public:
@@ -114,14 +115,16 @@ class HeapEntry BASE_EMBEDDED {
Type type,
const char* name,
SnapshotObjectId id,
- int self_size);
+ size_t self_size,
+ unsigned trace_node_id);
HeapSnapshot* snapshot() { return snapshot_; }
Type type() { return static_cast<Type>(type_); }
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
inline SnapshotObjectId id() { return id_; }
- int self_size() { return self_size_; }
+ size_t self_size() { return self_size_; }
+ unsigned trace_node_id() const { return trace_node_id_; }
INLINE(int index() const);
int children_count() const { return children_count_; }
INLINE(int set_children_index(int index));
@@ -146,10 +149,12 @@ class HeapEntry BASE_EMBEDDED {
unsigned type_: 4;
int children_count_: 28;
int children_index_;
- int self_size_;
- SnapshotObjectId id_;
+ size_t self_size_;
HeapSnapshot* snapshot_;
const char* name_;
+ SnapshotObjectId id_;
+ // id of allocation stack trace top node
+ unsigned trace_node_id_;
};
@@ -186,7 +191,8 @@ class HeapSnapshot {
HeapEntry* AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- int size);
+ size_t size,
+ unsigned trace_node_id);
HeapEntry* AddRootEntry();
HeapEntry* AddGcRootsEntry();
HeapEntry* AddGcSubrootEntry(int tag);
@@ -228,7 +234,7 @@ class HeapObjectsMap {
SnapshotObjectId FindOrAddEntry(Address addr,
unsigned int size,
bool accessed = true);
- void MoveObject(Address from, Address to, int size);
+ bool MoveObject(Address from, Address to, int size);
void UpdateObjectSize(Address addr, int size);
SnapshotObjectId last_assigned_id() const {
return next_id_ - kObjectIdStep;
@@ -338,32 +344,6 @@ class HeapObjectsSet {
};
-// An interface used to populate a snapshot with nodes and edges.
-class SnapshotFillerInterface {
- public:
- virtual ~SnapshotFillerInterface() { }
- virtual HeapEntry* AddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
- virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual void SetIndexedReference(HeapGraphEdge::Type type,
- int parent_entry,
- int index,
- HeapEntry* child_entry) = 0;
- virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedReference(HeapGraphEdge::Type type,
- int parent_entry,
- const char* reference_name,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
-};
-
-
class SnapshottingProgressReportingInterface {
public:
virtual ~SnapshottingProgressReportingInterface() { }
@@ -380,12 +360,16 @@ class V8HeapExplorer : public HeapEntriesAllocator {
v8::HeapProfiler::ObjectNameResolver* resolver);
virtual ~V8HeapExplorer();
virtual HeapEntry* AllocateEntry(HeapThing ptr);
- void AddRootEntries(SnapshotFillerInterface* filler);
+ void AddRootEntries(SnapshotFiller* filler);
int EstimateObjectsCount(HeapIterator* iterator);
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ bool IterateAndExtractReferences(SnapshotFiller* filler);
void TagGlobalObjects();
void TagCodeObject(Code* code);
void TagBuiltinCodeObject(Code* code, const char* name);
+ HeapEntry* AddEntry(Address address,
+ HeapEntry::Type type,
+ const char* name,
+ size_t size);
static String* GetConstructorName(JSObject* object);
@@ -396,6 +380,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name);
+
const char* GetSystemEntryName(HeapObject* object);
void ExtractReferences(HeapObject* obj);
@@ -414,6 +399,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractCellReferences(int entry, Cell* cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
+ void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
void ExtractClosureReferences(JSObject* js_obj, int entry);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
@@ -477,7 +463,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
StringsStorage* names_;
HeapObjectsMap* heap_object_map_;
SnapshottingProgressReportingInterface* progress_;
- SnapshotFillerInterface* filler_;
+ SnapshotFiller* filler_;
HeapObjectsSet objects_tags_;
HeapObjectsSet strong_gc_subroot_names_;
HeapObjectsSet user_roots_;
@@ -504,9 +490,9 @@ class NativeObjectsExplorer {
NativeObjectsExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress);
virtual ~NativeObjectsExplorer();
- void AddRootEntries(SnapshotFillerInterface* filler);
+ void AddRootEntries(SnapshotFiller* filler);
int EstimateObjectsCount();
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ bool IterateAndExtractReferences(SnapshotFiller* filler);
private:
void FillRetainedObjects();
@@ -546,7 +532,7 @@ class NativeObjectsExplorer {
HeapEntriesAllocator* synthetic_entries_allocator_;
HeapEntriesAllocator* native_entries_allocator_;
// Used during references extraction.
- SnapshotFillerInterface* filler_;
+ SnapshotFiller* filler_;
static HeapThing const kNativesRootObject;
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 82cf45f742..6374433bbd 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -105,7 +105,6 @@ Heap::Heap()
unflattened_strings_length_(0),
#ifdef DEBUG
allocation_timeout_(0),
- disallow_allocation_failure_(false),
#endif // DEBUG
new_space_high_promotion_mode_active_(false),
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
@@ -155,7 +154,7 @@ Heap::Heap()
configured_(false),
external_string_table_(this),
chunks_queued_for_free_(NULL),
- relocation_mutex_(NULL) {
+ gc_callbacks_depth_(0) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -545,7 +544,9 @@ void Heap::ProcessPretenuringFeedback() {
}
}
- if (trigger_deoptimization) isolate_->stack_guard()->DeoptMarkedCode();
+ if (trigger_deoptimization) {
+ isolate_->stack_guard()->DeoptMarkedAllocationSites();
+ }
FlushAllocationSitesScratchpad();
@@ -567,6 +568,25 @@ void Heap::ProcessPretenuringFeedback() {
}
+void Heap::DeoptMarkedAllocationSites() {
+ // TODO(hpayer): If iterating over the allocation sites list becomes a
+ // performance issue, use a cache heap data structure instead (similar to the
+ // allocation sites scratchpad).
+ Object* list_element = allocation_sites_list();
+ while (list_element->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(list_element);
+ if (site->deopt_dependent_code()) {
+ site->dependent_code()->MarkCodeForDeoptimization(
+ isolate_,
+ DependentCode::kAllocationSiteTenuringChangedGroup);
+ site->set_deopt_dependent_code(false);
+ }
+ list_element = site->weak_next();
+ }
+ Deoptimizer::DeoptimizeMarkedCode(isolate_);
+}
+
+
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
@@ -575,6 +595,9 @@ void Heap::GarbageCollectionEpilogue() {
ZapFromSpace();
}
+ // Process pretenuring feedback and update allocation sites.
+ ProcessPretenuringFeedback();
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -752,6 +775,21 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
}
+void Heap::EnsureFillerObjectAtTop() {
+ // There may be an allocation memento behind every object in new space.
+ // If we evacuate a not full new space or if we are on the last page of
+ // the new space, then there may be uninitialized memory behind the top
+ // pointer of the new space page. We store a filler object there to
+ // identify the unused space.
+ Address from_top = new_space_.top();
+ Address from_limit = new_space_.limit();
+ if (from_top < from_limit) {
+ int remaining_in_page = static_cast<int>(from_limit - from_top);
+ CreateFillerObjectAt(from_top, remaining_in_page);
+ }
+}
+
+
bool Heap::CollectGarbage(GarbageCollector collector,
const char* gc_reason,
const char* collector_reason,
@@ -768,17 +806,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
- // There may be an allocation memento behind every object in new space.
- // If we evacuate a not full new space or if we are on the last page of
- // the new space, then there may be uninitialized memory behind the top
- // pointer of the new space page. We store a filler object there to
- // identify the unused space.
- Address from_top = new_space_.top();
- Address from_limit = new_space_.limit();
- if (from_top < from_limit) {
- int remaining_in_page = static_cast<int>(from_limit - from_top);
- CreateFillerObjectAt(from_top, remaining_in_page);
- }
+ EnsureFillerObjectAtTop();
if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
@@ -852,16 +880,6 @@ int Heap::NotifyContextDisposed() {
}
-void Heap::PerformScavenge() {
- GCTracer tracer(this, NULL, NULL);
- if (incremental_marking()->IsStopped()) {
- PerformGarbageCollection(SCAVENGER, &tracer);
- } else {
- PerformGarbageCollection(MARK_COMPACTOR, &tracer);
- }
-}
-
-
void Heap::MoveElements(FixedArray* array,
int dst_index,
int src_index,
@@ -1068,11 +1086,14 @@ bool Heap::PerformGarbageCollection(
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
- {
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
+ { GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
+ }
}
EnsureFromSpaceIsCommitted();
@@ -1177,11 +1198,14 @@ bool Heap::PerformGarbageCollection(
amount_of_external_allocated_memory_;
}
- {
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+ { GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+ }
}
#ifdef VERIFY_HEAP
@@ -1621,8 +1645,6 @@ void Heap::Scavenge() {
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
- ProcessPretenuringFeedback();
-
LOG(isolate_, ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
@@ -1753,6 +1775,18 @@ static Object* VisitWeakList(Heap* heap,
}
+template <class T>
+static void ClearWeakList(Heap* heap,
+ Object* list) {
+ Object* undefined = heap->undefined_value();
+ while (list != undefined) {
+ T* candidate = reinterpret_cast<T*>(list);
+ list = WeakListVisitor<T>::WeakNext(candidate);
+ WeakListVisitor<T>::SetWeakNext(candidate, undefined);
+ }
+}
+
+
template<>
struct WeakListVisitor<JSFunction> {
static void SetWeakNext(JSFunction* function, Object* next) {
@@ -1846,7 +1880,11 @@ struct WeakListVisitor<Context> {
}
}
- static void VisitPhantomObject(Heap*, Context*) {
+ static void VisitPhantomObject(Heap* heap, Context* context) {
+ ClearWeakList<JSFunction>(heap,
+ context->get(Context::OPTIMIZED_FUNCTIONS_LIST));
+ ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
+ ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
}
static int WeakNextOffset() {
@@ -2002,14 +2040,12 @@ void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
AllocationSite* casted = AllocationSite::cast(cur);
if (casted->GetPretenureMode() == flag) {
casted->ResetPretenureDecision();
- bool got_marked = casted->dependent_code()->MarkCodeForDeoptimization(
- isolate_,
- DependentCode::kAllocationSiteTenuringChangedGroup);
- if (got_marked) marked = true;
+ casted->set_deopt_dependent_code(true);
+ marked = true;
}
cur = casted->weak_next();
}
- if (marked) isolate_->stack_guard()->DeoptMarkedCode();
+ if (marked) isolate_->stack_guard()->DeoptMarkedAllocationSites();
}
@@ -2672,8 +2708,7 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
if (!maybe_info->To(&info)) return maybe_info;
}
info->initialize_storage();
- info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
- SKIP_WRITE_BARRIER);
+ info->set_feedback_vector(empty_fixed_array(), SKIP_WRITE_BARRIER);
return info;
}
@@ -2856,7 +2891,7 @@ bool Heap::CreateInitialMaps() {
TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, non_strict_arguments_elements)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
@@ -2915,6 +2950,16 @@ bool Heap::CreateInitialMaps() {
TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
+
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { FixedTypedArrayBase* obj; \
+ if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array)->To(&obj)) \
+ return false; \
+ set_empty_fixed_##type##_array(obj); \
+ }
+
+ TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
+#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
}
ASSERT(!InNewSpace(empty_fixed_array()));
return true;
@@ -3055,6 +3100,17 @@ void Heap::CreateFixedStubs() {
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope(isolate());
+
+ // Create stubs that should be there, so we don't unexpectedly have to
+ // create them if we need them during the creation of another stub.
+ // Stub creation mixes raw pointers and handles in an unsafe manner so
+ // we cannot create stubs while we are creating stubs.
+ CodeStub::GenerateStubsAheadOfTime(isolate());
+
+ // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
+ // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
+ // is created.
+
// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
@@ -3065,12 +3121,6 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime(isolate());
}
@@ -3263,6 +3313,9 @@ bool Heap::CreateInitialObjects() {
}
set_undefined_cell(Cell::cast(obj));
+ // The symbol registry is initialized lazily.
+ set_symbol_registry(undefined_value());
+
// Allocate object to hold object observation state.
{ MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -3272,6 +3325,15 @@ bool Heap::CreateInitialObjects() {
}
set_observation_state(JSObject::cast(obj));
+ // Allocate object to hold object microtask state.
+ { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_microtask_state(JSObject::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -3282,8 +3344,26 @@ bool Heap::CreateInitialObjects() {
if (!maybe_obj->ToObject(&obj)) return false;
}
Symbol::cast(obj)->set_is_private(true);
+ set_nonexistent_symbol(Symbol::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ Symbol::cast(obj)->set_is_private(true);
set_elements_transition_symbol(Symbol::cast(obj));
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ Symbol::cast(obj)->set_is_private(true);
+ set_uninitialized_symbol(Symbol::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ Symbol::cast(obj)->set_is_private(true);
+ set_megamorphic_symbol(Symbol::cast(obj));
+
{ MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -3302,7 +3382,7 @@ bool Heap::CreateInitialObjects() {
set_materialized_objects(FixedArray::cast(obj));
// Handling of script id generation is in Factory::NewScript.
- set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
+ set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
{ MaybeObject* maybe_obj = AllocateAllocationSitesScratchpad();
if (!maybe_obj->ToObject(&obj)) return false;
@@ -3623,10 +3703,25 @@ void Heap::InitializeAllocationSitesScratchpad() {
}
-void Heap::AddAllocationSiteToScratchpad(AllocationSite* site) {
+void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode) {
if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
+ // We cannot use the normal write-barrier because slots need to be
+ // recorded with non-incremental marking as well. We have to explicitly
+ // record the slot to take evacuation candidates into account.
allocation_sites_scratchpad()->set(
- allocation_sites_scratchpad_length_, site);
+ allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
+ Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
+ allocation_sites_scratchpad_length_);
+
+ if (mode == RECORD_SCRATCHPAD_SLOT) {
+ // We need to allow slots buffer overflow here since the evacuation
+ // candidates are not part of the global list of old space pages and
+ // releasing an evacuation candidate due to a slots buffer overflow
+ // results in lost pages.
+ mark_compact_collector()->RecordSlot(
+ slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
+ }
allocation_sites_scratchpad_length_++;
}
}
@@ -3693,12 +3788,34 @@ Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
}
+Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
+ ElementsKind elementsKind) {
+ switch (elementsKind) {
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ return kEmptyFixed##Type##ArrayRootIndex;
+
+ TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
+#undef ELEMENT_KIND_TO_ROOT_INDEX
+ default:
+ UNREACHABLE();
+ return kUndefinedValueRootIndex;
+ }
+}
+
+
ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
return ExternalArray::cast(
roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
}
+FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
+ return FixedTypedArrayBase::cast(
+ roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
+}
+
+
MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
// We need to distinguish the minus zero value and this cannot be
// done after conversion to int. Doing this by comparing bit
@@ -3773,7 +3890,6 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
int start_position,
int end_position,
Object* script,
- Object* stack_trace,
Object* stack_frames) {
Object* result;
{ MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
@@ -3788,7 +3904,6 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
message->set_start_position(start_position);
message->set_end_position(end_position);
message->set_script(script);
- message->set_stack_trace(stack_trace);
message->set_stack_frames(stack_frames);
return result;
}
@@ -3798,8 +3913,7 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
const ExternalAsciiString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x5);
+ return isolate()->ThrowInvalidStringLength();
}
Map* map = external_ascii_string_map();
@@ -3821,8 +3935,7 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x6);
+ return isolate()->ThrowInvalidStringLength();
}
// For small strings we check whether the resource contains only
@@ -3873,7 +3986,7 @@ MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException(0x7);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
}
int size = ByteArray::SizeFor(length);
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
@@ -3903,6 +4016,33 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
}
+bool Heap::CanMoveObjectStart(HeapObject* object) {
+ Address address = object->address();
+ bool is_in_old_pointer_space = InOldPointerSpace(address);
+ bool is_in_old_data_space = InOldDataSpace(address);
+
+ if (lo_space()->Contains(object)) return false;
+
+ // We cannot move the object start if the given old space page is
+ // concurrently swept.
+ return (!is_in_old_pointer_space && !is_in_old_data_space) ||
+ Page::FromAddress(address)->parallel_sweeping() <=
+ MemoryChunk::PARALLEL_SWEEPING_FINALIZE;
+}
+
+
+void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
+ if (incremental_marking()->IsMarking() &&
+ Marking::IsBlack(Marking::MarkBitFrom(address))) {
+ if (mode == FROM_GC) {
+ MemoryChunk::IncrementLiveBytesFromGC(address, by);
+ } else {
+ MemoryChunk::IncrementLiveBytesFromMutator(address, by);
+ }
+ }
+}
+
+
MaybeObject* Heap::AllocateExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
@@ -3971,6 +4111,7 @@ MaybeObject* Heap::AllocateFixedTypedArray(int length,
reinterpret_cast<FixedTypedArrayBase*>(object);
elements->set_map(MapForFixedTypedArray(array_type));
elements->set_length(length);
+ memset(elements->DataPtr(), 0, elements->DataSize());
return elements;
}
@@ -3981,12 +4122,20 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
bool immovable,
bool crankshafted,
int prologue_offset) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
+ // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
+ // do not risk leaving uninitialized Code object (and breaking the heap).
ByteArray* reloc_info;
MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
+ ConstantPoolArray* constant_pool;
+ if (FLAG_enable_ool_constant_pool) {
+ MaybeObject* maybe_constant_pool = desc.origin->AllocateConstantPool(this);
+ if (!maybe_constant_pool->To(&constant_pool)) return maybe_constant_pool;
+ } else {
+ constant_pool = empty_constant_pool_array();
+ }
+
// Compute size.
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
int obj_size = Code::SizeFor(body_size);
@@ -4026,6 +4175,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_is_crankshafted(crankshafted);
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_raw_type_feedback_info(undefined_value());
+ code->set_next_code_link(undefined_value());
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
@@ -4033,7 +4183,11 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
- code->set_constant_pool(empty_constant_pool_array());
+
+ if (FLAG_enable_ool_constant_pool) {
+ desc.origin->PopulateConstantPool(constant_pool);
+ }
+ code->set_constant_pool(constant_pool);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (code->kind() == Code::FUNCTION) {
@@ -4064,9 +4218,20 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
MaybeObject* Heap::CopyCode(Code* code) {
+ MaybeObject* maybe_result;
+ Object* new_constant_pool;
+ if (FLAG_enable_ool_constant_pool &&
+ code->constant_pool() != empty_constant_pool_array()) {
+ // Copy the constant pool, since edits to the copied code may modify
+ // the constant pool.
+ maybe_result = CopyConstantPoolArray(code->constant_pool());
+ if (!maybe_result->ToObject(&new_constant_pool)) return maybe_result;
+ } else {
+ new_constant_pool = empty_constant_pool_array();
+ }
+
// Allocate an object the same size as the code object.
int obj_size = code->Size();
- MaybeObject* maybe_result;
if (obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
@@ -4080,8 +4245,12 @@ MaybeObject* Heap::CopyCode(Code* code) {
Address old_addr = code->address();
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
CopyBlock(new_addr, old_addr, obj_size);
- // Relocate the copy.
Code* new_code = Code::cast(result);
+
+ // Update the constant pool.
+ new_code->set_constant_pool(new_constant_pool);
+
+ // Relocate the copy.
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
@@ -4090,8 +4259,8 @@ MaybeObject* Heap::CopyCode(Code* code) {
MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
+ // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
+ // do not risk leaving uninitialized Code object (and breaking the heap).
Object* reloc_info_array;
{ MaybeObject* maybe_reloc_info_array =
AllocateByteArray(reloc_info.length(), TENURED);
@@ -4099,6 +4268,18 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
return maybe_reloc_info_array;
}
}
+ Object* new_constant_pool;
+ if (FLAG_enable_ool_constant_pool &&
+ code->constant_pool() != empty_constant_pool_array()) {
+ // Copy the constant pool, since edits to the copied code may modify
+ // the constant pool.
+ MaybeObject* maybe_constant_pool =
+ CopyConstantPoolArray(code->constant_pool());
+ if (!maybe_constant_pool->ToObject(&new_constant_pool))
+ return maybe_constant_pool;
+ } else {
+ new_constant_pool = empty_constant_pool_array();
+ }
int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
@@ -4128,6 +4309,9 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Code* new_code = Code::cast(result);
new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
+ // Update constant pool.
+ new_code->set_constant_pool(new_constant_pool);
+
// Copy patched rinfo.
CopyBytes(new_code->relocation_start(),
reloc_info.start(),
@@ -4158,28 +4342,8 @@ void Heap::InitializeAllocationMemento(AllocationMemento* memento,
}
-MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
- Handle<AllocationSite> allocation_site) {
- ASSERT(gc_state_ == NOT_IN_GC);
- ASSERT(map->instance_type() != MAP_TYPE);
- // If allocation failures are disallowed, we may allocate in a different
- // space when new space is full and the object is not a large object.
- AllocationSpace retry_space =
- (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
- int size = map->instance_size() + AllocationMemento::kSize;
- Object* result;
- MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // No need for write barrier since object is white and map is in old space.
- HeapObject::cast(result)->set_map_no_write_barrier(map);
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(result) + map->instance_size());
- InitializeAllocationMemento(alloc_memento, *allocation_site);
- return result;
-}
-
-
-MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
+MaybeObject* Heap::Allocate(Map* map, AllocationSpace space,
+ AllocationSite* allocation_site) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
// If allocation failures are disallowed, we may allocate in a different
@@ -4187,11 +4351,19 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
AllocationSpace retry_space =
(space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
int size = map->instance_size();
+ if (allocation_site != NULL) {
+ size += AllocationMemento::kSize;
+ }
Object* result;
MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
if (!maybe_result->ToObject(&result)) return maybe_result;
// No need for write barrier since object is white and map is in old space.
HeapObject::cast(result)->set_map_no_write_barrier(map);
+ if (allocation_site != NULL) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(result) + map->instance_size());
+ InitializeAllocationMemento(alloc_memento, allocation_site);
+ }
return result;
}
@@ -4233,16 +4405,15 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
JSObject* boilerplate;
int arguments_object_size;
bool strict_mode_callee = callee->IsJSFunction() &&
- !JSFunction::cast(callee)->shared()->is_classic_mode();
+ JSFunction::cast(callee)->shared()->strict_mode() == STRICT;
if (strict_mode_callee) {
boilerplate =
- isolate()->context()->native_context()->
- strict_mode_arguments_boilerplate();
- arguments_object_size = kArgumentsObjectSizeStrict;
+ isolate()->context()->native_context()->strict_arguments_boilerplate();
+ arguments_object_size = kStrictArgumentsObjectSize;
} else {
boilerplate =
- isolate()->context()->native_context()->arguments_boilerplate();
- arguments_object_size = kArgumentsObjectSize;
+ isolate()->context()->native_context()->sloppy_arguments_boilerplate();
+ arguments_object_size = kSloppyArgumentsObjectSize;
}
// Check that the size of the boilerplate matches our
@@ -4268,7 +4439,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Smi::FromInt(length),
SKIP_WRITE_BARRIER);
- // Set the callee property for non-strict mode arguments object only.
+ // Set the callee property for sloppy mode arguments object only.
if (!strict_mode_callee) {
JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
callee);
@@ -4315,7 +4486,10 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
MaybeObject* Heap::AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure, bool allocate_properties) {
+ Map* map,
+ PretenureFlag pretenure,
+ bool allocate_properties,
+ AllocationSite* allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
@@ -4341,90 +4515,28 @@ MaybeObject* Heap::AllocateJSObjectFromMap(
int size = map->instance_size();
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
Object* obj;
- MaybeObject* maybe_obj = Allocate(map, space);
+ MaybeObject* maybe_obj = Allocate(map, space, allocation_site);
if (!maybe_obj->To(&obj)) return maybe_obj;
// Initialize the JSObject.
InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
ASSERT(JSObject::cast(obj)->HasFastElements() ||
- JSObject::cast(obj)->HasExternalArrayElements());
- return obj;
-}
-
-
-MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
- Map* map, Handle<AllocationSite> allocation_site) {
- // JSFunctions should be allocated using AllocateFunction to be
- // properly initialized.
- ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
-
- // Both types of global objects should be allocated using
- // AllocateGlobalObject to be properly initialized.
- ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
- ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
-
- // Allocate the backing storage for the properties.
- int prop_size = map->InitialPropertiesLength();
- ASSERT(prop_size >= 0);
- FixedArray* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
- if (!maybe_properties->To(&properties)) return maybe_properties;
- }
-
- // Allocate the JSObject.
- int size = map->instance_size();
- AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
- Object* obj;
- MaybeObject* maybe_obj =
- AllocateWithAllocationSite(map, space, allocation_site);
- if (!maybe_obj->To(&obj)) return maybe_obj;
-
- // Initialize the JSObject.
- InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
- ASSERT(JSObject::cast(obj)->HasFastElements());
+ JSObject::cast(obj)->HasExternalArrayElements() ||
+ JSObject::cast(obj)->HasFixedTypedArrayElements());
return obj;
}
MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure) {
+ PretenureFlag pretenure,
+ AllocationSite* allocation_site) {
ASSERT(constructor->has_initial_map());
- // Allocate the object based on the constructors initial map.
- MaybeObject* result = AllocateJSObjectFromMap(
- constructor->initial_map(), pretenure);
-#ifdef DEBUG
- // Make sure result is NOT a global object if valid.
- Object* non_failure;
- ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
-#endif
- return result;
-}
-
-MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
- Handle<AllocationSite> allocation_site) {
- ASSERT(constructor->has_initial_map());
- // Allocate the object based on the constructors initial map, or the payload
- // advice
- Map* initial_map = constructor->initial_map();
-
- ElementsKind to_kind = allocation_site->GetElementsKind();
- AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
- if (to_kind != initial_map->elements_kind()) {
- MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
- if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
- // Possibly alter the mode, since we found an updated elements kind
- // in the type info cell.
- mode = AllocationSite::GetMode(to_kind);
- }
-
- MaybeObject* result;
- if (mode == TRACK_ALLOCATION_SITE) {
- result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
- allocation_site);
- } else {
- result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
- }
+ // Allocate the object based on the constructors initial map.
+ MaybeObject* result = AllocateJSObjectFromMap(constructor->initial_map(),
+ pretenure,
+ true,
+ allocation_site);
#ifdef DEBUG
// Make sure result is NOT a global object if valid.
Object* non_failure;
@@ -4926,16 +5038,13 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
int size;
Map* map;
+ if (chars < 0 || chars > String::kMaxLength) {
+ return isolate()->ThrowInvalidStringLength();
+ }
if (is_one_byte) {
- if (chars > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x9);
- }
map = ascii_internalized_string_map();
size = SeqOneByteString::SizeFor(chars);
} else {
- if (chars > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xa);
- }
map = internalized_string_map();
size = SeqTwoByteString::SizeFor(chars);
}
@@ -4977,8 +5086,8 @@ MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
MaybeObject* Heap::AllocateRawOneByteString(int length,
PretenureFlag pretenure) {
- if (length < 0 || length > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xb);
+ if (length < 0 || length > String::kMaxLength) {
+ return isolate()->ThrowInvalidStringLength();
}
int size = SeqOneByteString::SizeFor(length);
ASSERT(size <= SeqOneByteString::kMaxSize);
@@ -5001,8 +5110,8 @@ MaybeObject* Heap::AllocateRawOneByteString(int length,
MaybeObject* Heap::AllocateRawTwoByteString(int length,
PretenureFlag pretenure) {
- if (length < 0 || length > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xc);
+ if (length < 0 || length > String::kMaxLength) {
+ return isolate()->ThrowInvalidStringLength();
}
int size = SeqTwoByteString::SizeFor(length);
ASSERT(size <= SeqTwoByteString::kMaxSize);
@@ -5054,6 +5163,38 @@ MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
}
+MaybeObject* Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
+ if (!InNewSpace(src)) {
+ return src;
+ }
+
+ int len = src->length();
+ Object* obj;
+ { MaybeObject* maybe_obj = AllocateRawFixedArray(len, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ HeapObject::cast(obj)->set_map_no_write_barrier(fixed_array_map());
+ FixedArray* result = FixedArray::cast(obj);
+ result->set_length(len);
+
+ // Copy the content
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
+
+ // TODO(mvstanton): The map is set twice because of protection against calling
+ // set() on a COW FixedArray. Issue v8:3221 created to track this, and
+ // we might then be able to remove this whole method.
+ HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
+ return result;
+}
+
+
+MaybeObject* Heap::AllocateEmptyFixedTypedArray(ExternalArrayType array_type) {
+ return AllocateFixedTypedArray(0, array_type, TENURED);
+}
+
+
MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
Object* obj;
@@ -5100,27 +5241,30 @@ MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
Map* map) {
int int64_entries = src->count_of_int64_entries();
- int ptr_entries = src->count_of_ptr_entries();
+ int code_ptr_entries = src->count_of_code_ptr_entries();
+ int heap_ptr_entries = src->count_of_heap_ptr_entries();
int int32_entries = src->count_of_int32_entries();
Object* obj;
{ MaybeObject* maybe_obj =
- AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
+ AllocateConstantPoolArray(int64_entries, code_ptr_entries,
+ heap_ptr_entries, int32_entries);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
HeapObject* dst = HeapObject::cast(obj);
dst->set_map_no_write_barrier(map);
+ int size = ConstantPoolArray::SizeFor(
+ int64_entries, code_ptr_entries, heap_ptr_entries, int32_entries);
CopyBlock(
dst->address() + ConstantPoolArray::kLengthOffset,
src->address() + ConstantPoolArray::kLengthOffset,
- ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
- - ConstantPoolArray::kLengthOffset);
+ size - ConstantPoolArray::kLengthOffset);
return obj;
}
MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xe);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
}
int size = FixedArray::SizeFor(length);
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
@@ -5232,7 +5376,7 @@ MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
PretenureFlag pretenure) {
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xf);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
}
int size = FixedDoubleArray::SizeFor(length);
#ifndef V8_HOST_ARCH_64_BIT
@@ -5250,12 +5394,14 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries) {
- ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
- number_of_int32_entries > 0);
+ ASSERT(number_of_int64_entries > 0 || number_of_code_ptr_entries > 0 ||
+ number_of_heap_ptr_entries > 0 || number_of_int32_entries > 0);
int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
- number_of_ptr_entries,
+ number_of_code_ptr_entries,
+ number_of_heap_ptr_entries,
number_of_int32_entries);
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
@@ -5272,29 +5418,38 @@ MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
ConstantPoolArray* constant_pool =
reinterpret_cast<ConstantPoolArray*>(object);
constant_pool->SetEntryCounts(number_of_int64_entries,
- number_of_ptr_entries,
+ number_of_code_ptr_entries,
+ number_of_heap_ptr_entries,
number_of_int32_entries);
- if (number_of_ptr_entries > 0) {
+ if (number_of_code_ptr_entries > 0) {
+ int offset =
+ constant_pool->OffsetOfElementAt(constant_pool->first_code_ptr_index());
+ MemsetPointer(
+ reinterpret_cast<Address*>(HeapObject::RawField(constant_pool, offset)),
+ isolate()->builtins()->builtin(Builtins::kIllegal)->entry(),
+ number_of_code_ptr_entries);
+ }
+ if (number_of_heap_ptr_entries > 0) {
+ int offset =
+ constant_pool->OffsetOfElementAt(constant_pool->first_heap_ptr_index());
MemsetPointer(
- HeapObject::RawField(
- constant_pool,
- constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
+ HeapObject::RawField(constant_pool, offset),
undefined_value(),
- number_of_ptr_entries);
+ number_of_heap_ptr_entries);
}
return constant_pool;
}
MaybeObject* Heap::AllocateEmptyConstantPoolArray() {
- int size = ConstantPoolArray::SizeFor(0, 0, 0);
+ int size = ConstantPoolArray::SizeFor(0, 0, 0, 0);
Object* result;
{ MaybeObject* maybe_result =
AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map_no_write_barrier(constant_pool_array_map());
- ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0);
+ ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0, 0);
return result;
}
@@ -5826,6 +5981,9 @@ void Heap::Verify() {
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
+ VerifySmisVisitor smis_visitor;
+ IterateSmiRoots(&smis_visitor);
+
new_space_.Verify();
old_pointer_space_->Verify(&visitor);
@@ -6123,6 +6281,14 @@ void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
}
+void Heap::IterateSmiRoots(ObjectVisitor* v) {
+ // Acquire execution access since we are going to read stack limit values.
+ ExecutionAccess access(isolate());
+ v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
+ v->Synchronize(VisitorSynchronization::kSmiRootList);
+}
+
+
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -6345,7 +6511,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
bool Heap::AdvanceSweepers(int step_size) {
- ASSERT(isolate()->num_sweeper_threads() == 0);
+ ASSERT(!mark_compact_collector()->AreSweeperThreadsActivated());
bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
return sweeping_complete;
@@ -6499,8 +6665,6 @@ bool Heap::SetUp() {
mark_compact_collector()->SetUp();
- if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
-
return true;
}
@@ -6642,9 +6806,6 @@ void Heap::TearDown() {
incremental_marking()->TearDown();
isolate_->memory_allocator()->TearDown();
-
- delete relocation_mutex_;
- relocation_mutex_ = NULL;
}
@@ -7386,8 +7547,9 @@ GCTracer::~GCTracer() {
PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
- PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
- PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
+ PrintF("sweep=%.2f ", scopes_[Scope::MC_SWEEP]);
+ PrintF("sweepns=%.2f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
+ PrintF("sweepos=%.2f ", scopes_[Scope::MC_SWEEP_OLDSPACE]);
PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
@@ -7518,7 +7680,7 @@ void DescriptorLookupCache::Clear() {
void Heap::GarbageCollectionGreedyCheck() {
ASSERT(FLAG_gc_greedy);
if (isolate_->bootstrapper()->IsActive()) return;
- if (disallow_allocation_failure()) return;
+ if (!AllowAllocationFailure::IsAllowed(isolate_)) return;
CollectGarbage(NEW_SPACE);
}
#endif
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 266cdb9684..0f586e9284 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -78,7 +78,6 @@ namespace internal {
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \
- V(Smi, stack_limit, StackLimit) \
V(Oddball, arguments_marker, ArgumentsMarker) \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
@@ -165,7 +164,17 @@ namespace internal {
V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
- V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
+ V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
+ V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
+ V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
+ V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
+ V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
+ V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
+ EmptyFixedUint8ClampedArray) \
+ V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, function_context_map, FunctionContextMap) \
V(Map, catch_context_map, CatchContextMap) \
V(Map, with_context_map, WithContextMap) \
@@ -186,27 +195,37 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
- V(Smi, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
- V(Smi, real_stack_limit, RealStackLimit) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
- V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
- V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
- V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
V(Cell, undefined_cell, UndefineCell) \
V(JSObject, observation_state, ObservationState) \
V(Map, external_map, ExternalMap) \
+ V(Object, symbol_registry, SymbolRegistry) \
V(Symbol, frozen_symbol, FrozenSymbol) \
+ V(Symbol, nonexistent_symbol, NonExistentSymbol) \
V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
V(Symbol, observed_symbol, ObservedSymbol) \
+ V(Symbol, uninitialized_symbol, UninitializedSymbol) \
+ V(Symbol, megamorphic_symbol, MegamorphicSymbol) \
V(FixedArray, materialized_objects, MaterializedObjects) \
- V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad)
+ V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
+ V(JSObject, microtask_state, MicrotaskState)
+
+// Entries in this list are limited to Smis and are not visited during GC.
+#define SMI_ROOT_LIST(V) \
+ V(Smi, stack_limit, StackLimit) \
+ V(Smi, real_stack_limit, RealStackLimit) \
+ V(Smi, last_script_id, LastScriptId) \
+ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
+ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
+ V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
+ SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
// Heap roots that are known to be immortal immovable, for which we can safely
@@ -242,7 +261,7 @@ namespace internal {
V(empty_constant_pool_array) \
V(arguments_marker) \
V(symbol_map) \
- V(non_strict_arguments_elements_map) \
+ V(sloppy_arguments_elements_map) \
V(function_context_map) \
V(catch_context_map) \
V(with_context_map) \
@@ -297,6 +316,11 @@ namespace internal {
V(String_string, "String") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
+ V(for_string, "for") \
+ V(for_api_string, "for_api") \
+ V(for_intern_string, "for_intern") \
+ V(private_api_string, "private_api") \
+ V(private_intern_string, "private_intern") \
V(Date_string, "Date") \
V(this_string, "this") \
V(to_string_string, "toString") \
@@ -325,10 +349,6 @@ namespace internal {
V(MakeReferenceError_string, "MakeReferenceError") \
V(MakeSyntaxError_string, "MakeSyntaxError") \
V(MakeTypeError_string, "MakeTypeError") \
- V(invalid_lhs_in_assignment_string, "invalid_lhs_in_assignment") \
- V(invalid_lhs_in_for_in_string, "invalid_lhs_in_for_in") \
- V(invalid_lhs_in_postfix_op_string, "invalid_lhs_in_postfix_op") \
- V(invalid_lhs_in_prefix_op_string, "invalid_lhs_in_prefix_op") \
V(illegal_return_string, "illegal_return") \
V(illegal_break_string, "illegal_break") \
V(illegal_continue_string, "illegal_continue") \
@@ -678,14 +698,13 @@ class Heap {
// constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
+ // If allocation_site is non-null, then a memento is emitted after the object
+ // that points to the site.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSObject(
JSFunction* constructor,
- PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT MaybeObject* AllocateJSObjectWithAllocationSite(
- JSFunction* constructor,
- Handle<AllocationSite> allocation_site);
+ PretenureFlag pretenure = NOT_TENURED,
+ AllocationSite* allocation_site = NULL);
MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
ScopeInfo* scope_info);
@@ -765,21 +784,21 @@ class Heap {
// Allocates and initializes a new JavaScript object based on a map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
+ // Passing an allocation site means that a memento will be created that
+ // points to the site.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure = NOT_TENURED, bool alloc_props = true);
-
- MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite(
- Map* map, Handle<AllocationSite> allocation_site);
+ Map* map,
+ PretenureFlag pretenure = NOT_TENURED,
+ bool alloc_props = true,
+ AllocationSite* allocation_site = NULL);
// Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
-
- MUST_USE_RESULT MaybeObject* AllocateWithAllocationSite(Map* map,
- AllocationSpace space, Handle<AllocationSite> allocation_site);
+ MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space,
+ AllocationSite* allocation_site = NULL);
// Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -972,6 +991,10 @@ class Heap {
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src);
+ // Make a copy of src and return it. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT MaybeObject* CopyAndTenureFixedCOWArray(FixedArray* src);
+
// Make a copy of src, set the map, and return the copy. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map);
@@ -1005,9 +1028,10 @@ class Heap {
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateConstantPoolArray(
- int first_int64_index,
- int first_ptr_index,
- int first_int32_index);
+ int number_of_int64_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
+ int number_of_int32_entries);
// Allocates a fixed double array with uninitialized values. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
@@ -1070,15 +1094,15 @@ class Heap {
Object* prototype,
PretenureFlag pretenure = TENURED);
- // Arguments object size.
- static const int kArgumentsObjectSize =
+ // Sloppy mode arguments object size.
+ static const int kSloppyArgumentsObjectSize =
JSObject::kHeaderSize + 2 * kPointerSize;
// Strict mode arguments has no callee so it is smaller.
- static const int kArgumentsObjectSizeStrict =
+ static const int kStrictArgumentsObjectSize =
JSObject::kHeaderSize + 1 * kPointerSize;
// Indicies for direct access into argument objects.
static const int kArgumentsLengthIndex = 0;
- // callee is only valid in non-strict mode.
+ // callee is only valid in sloppy mode.
static const int kArgumentsCalleeIndex = 1;
// Allocates an arguments object - optionally with an elements array.
@@ -1134,7 +1158,6 @@ class Heap {
int start_position,
int end_position,
Object* script,
- Object* stack_trace,
Object* stack_frames);
// Allocate a new external string object, which is backed by a string
@@ -1164,6 +1187,13 @@ class Heap {
// when shortening objects.
void CreateFillerObjectAt(Address addr, int size);
+ bool CanMoveObjectStart(HeapObject* object);
+
+ enum InvocationMode { FROM_GC, FROM_MUTATOR };
+
+ // Maintain marking consistency for IncrementalMarking.
+ void AdjustLiveBytes(Address address, int by, InvocationMode mode);
+
// Makes a new native code object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. On success, the pointer to the Code object is stored in the
@@ -1255,10 +1285,6 @@ class Heap {
// Notify the heap that a context has been disposed.
int NotifyContextDisposed();
- // Utility to invoke the scavenger. This is needed in test code to
- // ensure correct callback for weak global handles.
- void PerformScavenge();
-
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
@@ -1347,6 +1373,9 @@ class Heap {
void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over entries in the smi roots list. Only interesting to the
+ // serializer/deserializer, since GC does not care about smis.
+ void IterateSmiRoots(ObjectVisitor* v);
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
@@ -1485,10 +1514,6 @@ class Heap {
allocation_timeout_ = timeout;
}
- bool disallow_allocation_failure() {
- return disallow_allocation_failure_;
- }
-
void TracePathToObjectFrom(Object* target, Object* root);
void TracePathToObject(Object* target);
void TracePathToGlobal();
@@ -1501,10 +1526,16 @@ class Heap {
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ enum ScratchpadSlotMode {
+ IGNORE_SCRATCHPAD_SLOT,
+ RECORD_SCRATCHPAD_SLOT
+ };
+
// An object may have an AllocationSite associated with it through a trailing
// AllocationMemento. Its feedback should be updated when objects are found
// in the heap.
- static inline void UpdateAllocationSiteFeedback(HeapObject* object);
+ static inline void UpdateAllocationSiteFeedback(
+ HeapObject* object, ScratchpadSlotMode mode);
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
@@ -1582,7 +1613,7 @@ class Heap {
// Implements the corresponding V8 API function.
bool IdleNotification(int hint);
- // Declare all the root indices.
+ // Declare all the root indices. This defines the root list order.
enum RootListIndex {
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
@@ -1598,8 +1629,14 @@ class Heap {
#undef DECLARE_STRUCT_MAP
kStringTableRootIndex,
+
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+ SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+
+ kRootListLength,
kStrongRootListLength = kStringTableRootIndex,
- kRootListLength
+ kSmiRootsStart = kStringTableRootIndex + 1
};
STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
@@ -1628,7 +1665,9 @@ class Heap {
ExternalArrayType array_type);
RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
+ RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
ExternalArray* EmptyExternalArrayForMap(Map* map);
+ FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -1834,6 +1873,8 @@ class Heap {
return amount_of_external_allocated_memory_;
}
+ void DeoptMarkedAllocationSites();
+
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
@@ -1879,16 +1920,12 @@ class Heap {
class RelocationLock {
public:
explicit RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_concurrent_recompilation) {
- heap_->relocation_mutex_->Lock();
- }
+ heap_->relocation_mutex_.Lock();
}
~RelocationLock() {
- if (FLAG_concurrent_recompilation) {
- heap_->relocation_mutex_->Unlock();
- }
+ heap_->relocation_mutex_.Unlock();
}
private:
@@ -1984,10 +2021,6 @@ class Heap {
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
int allocation_timeout_;
-
- // Do we expect to be able to handle allocation failure at this
- // time?
- bool disallow_allocation_failure_;
#endif // DEBUG
// Indicates that the new space should be kept small due to high promotion
@@ -2120,6 +2153,11 @@ class Heap {
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
+ // Make sure there is a filler value behind the top of the new space
+ // so that the GC does not confuse some unintialized/stale memory
+ // with the allocation memento of the object at the top
+ void EnsureFillerObjectAtTop();
+
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
@@ -2195,6 +2233,10 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateEmptyExternalArray(
ExternalArrayType array_type);
+ // Allocate empty fixed typed array of given type.
+ MUST_USE_RESULT MaybeObject* AllocateEmptyFixedTypedArray(
+ ExternalArrayType array_type);
+
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
@@ -2296,7 +2338,8 @@ class Heap {
void InitializeAllocationSitesScratchpad();
// Adds an allocation site to the scratchpad if there is space left.
- void AddAllocationSiteToScratchpad(AllocationSite* site);
+ void AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode);
void UpdateSurvivalRateTrend(int start_new_space_size);
@@ -2489,14 +2532,12 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
- Mutex* relocation_mutex_;
-#ifdef DEBUG
- bool relocation_mutex_locked_by_optimizer_thread_;
-#endif // DEBUG;
+ Mutex relocation_mutex_;
+
+ int gc_callbacks_depth_;
friend class Factory;
friend class GCTracer;
- friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
friend class Page;
friend class Isolate;
@@ -2506,6 +2547,7 @@ class Heap {
#ifdef VERIFY_HEAP
friend class NoWeakObjectVerificationScope;
#endif
+ friend class GCCallbacksScope;
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2546,26 +2588,15 @@ class HeapStats {
};
-class DisallowAllocationFailure {
- public:
- inline DisallowAllocationFailure();
- inline ~DisallowAllocationFailure();
-
-#ifdef DEBUG
- private:
- bool old_state_;
-#endif
-};
-
-
class AlwaysAllocateScope {
public:
- inline AlwaysAllocateScope();
+ explicit inline AlwaysAllocateScope(Isolate* isolate);
inline ~AlwaysAllocateScope();
private:
// Implicitly disable artificial allocation failures.
- DisallowAllocationFailure disallow_allocation_failure_;
+ Heap* heap_;
+ DisallowAllocationFailure daf_;
};
@@ -2578,6 +2609,18 @@ class NoWeakObjectVerificationScope {
#endif
+class GCCallbacksScope {
+ public:
+ explicit inline GCCallbacksScope(Heap* heap);
+ inline ~GCCallbacksScope();
+
+ inline bool CheckReenter();
+
+ private:
+ Heap* heap_;
+};
+
+
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
@@ -2589,6 +2632,13 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
+// Verify that all objects are Smis.
+class VerifySmisVisitor: public ObjectVisitor {
+ public:
+ inline void VisitPointers(Object** start, Object** end);
+};
+
+
// Space iterator for iterating over all spaces of the heap. Returns each space
// in turn, and null when it is done.
class AllSpaces BASE_EMBEDDED {
@@ -2829,6 +2879,7 @@ class GCTracer BASE_EMBEDDED {
MC_MARK,
MC_SWEEP,
MC_SWEEP_NEWSPACE,
+ MC_SWEEP_OLDSPACE,
MC_EVACUATE_PAGES,
MC_UPDATE_NEW_TO_NEW_POINTERS,
MC_UPDATE_ROOT_TO_NEW_POINTERS,
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/hydrogen-bce.cc
index c98a03cb5a..dd71078a23 100644
--- a/deps/v8/src/hydrogen-bce.cc
+++ b/deps/v8/src/hydrogen-bce.cc
@@ -30,6 +30,7 @@
namespace v8 {
namespace internal {
+
// We try to "factor up" HBoundsCheck instructions towards the root of the
// dominator tree.
// For now we handle checks where the index is like "exp + int32value".
@@ -135,7 +136,7 @@ class BoundsCheckBbData: public ZoneObject {
void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) {
BoundsCheckBbData* data = FatherInDominatorTree();
while (data != NULL && data->UpperCheck() == check) {
- ASSERT(data->upper_offset_ <= offset);
+ ASSERT(data->upper_offset_ < offset);
data->upper_offset_ = offset;
data = data->FatherInDominatorTree();
}
@@ -173,7 +174,7 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
upper_check_ = new_check;
} else {
- TightenCheck(upper_check_, new_check);
+ TightenCheck(upper_check_, new_check, new_offset);
UpdateUpperOffsets(upper_check_, upper_offset_);
}
} else if (new_offset < lower_offset_) {
@@ -182,7 +183,7 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
lower_check_ = new_check;
} else {
- TightenCheck(lower_check_, new_check);
+ TightenCheck(lower_check_, new_check, new_offset);
UpdateLowerOffsets(lower_check_, lower_offset_);
}
} else {
@@ -191,12 +192,20 @@ class BoundsCheckBbData: public ZoneObject {
}
if (!keep_new_check) {
+ if (FLAG_trace_bce) {
+ OS::Print("Eliminating check #%d after tightening\n",
+ new_check->id());
+ }
new_check->block()->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
new_check->DeleteAndReplaceWith(new_check->ActualValue());
} else {
HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
: lower_check_;
+ if (FLAG_trace_bce) {
+ OS::Print("Moving second check #%d after first check #%d\n",
+ new_check->id(), first_check->id());
+ }
// The length is guaranteed to be live at first_check.
ASSERT(new_check->length() == first_check->length());
HInstruction* old_position = new_check->next();
@@ -275,11 +284,16 @@ class BoundsCheckBbData: public ZoneObject {
}
void TightenCheck(HBoundsCheck* original_check,
- HBoundsCheck* tighter_check) {
+ HBoundsCheck* tighter_check,
+ int32_t new_offset) {
ASSERT(original_check->length() == tighter_check->length());
MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
original_check->ReplaceAllUsesWith(original_check->index());
original_check->SetOperandAt(0, tighter_check->index());
+ if (FLAG_trace_bce) {
+ OS::Print("Tightened check #%d with offset %d from #%d\n",
+ original_check->id(), new_offset, tighter_check->id());
+ }
}
DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
@@ -389,11 +403,32 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
bb_data_list,
NULL);
*data_p = bb_data_list;
+ if (FLAG_trace_bce) {
+ OS::Print("Fresh bounds check data for block #%d: [%d]\n",
+ bb->block_id(), offset);
+ }
} else if (data->OffsetIsCovered(offset)) {
bb->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
+ if (FLAG_trace_bce) {
+ OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
+ check->id(), offset);
+ }
check->DeleteAndReplaceWith(check->ActualValue());
} else if (data->BasicBlock() == bb) {
+ // TODO(jkummerow): I think the following logic would be preferable:
+ // if (data->Basicblock() == bb ||
+ // graph()->use_optimistic_licm() ||
+ // bb->IsLoopSuccessorDominator()) {
+ // data->CoverCheck(check, offset)
+ // } else {
+ // /* add pristine BCBbData like in (data == NULL) case above */
+ // }
+ // Even better would be: distinguish between read-only dominator-imposed
+ // knowledge and modifiable upper/lower checks.
+ // What happens currently is that the first bounds check in a dominated
+ // block will stay around while any further checks are hoisted out,
+ // which doesn't make sense. Investigate/fix this in a future CL.
data->CoverCheck(check, offset);
} else if (graph()->use_optimistic_licm() ||
bb->IsLoopSuccessorDominator()) {
@@ -411,6 +446,10 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
data->UpperCheck(),
bb_data_list,
data);
+ if (FLAG_trace_bce) {
+ OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
+ bb->block_id(), new_lower_offset, new_upper_offset);
+ }
table_.Insert(key, bb_data_list, zone());
}
}
diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/hydrogen-check-elimination.cc
index e12f14a13f..52a549299a 100644
--- a/deps/v8/src/hydrogen-check-elimination.cc
+++ b/deps/v8/src/hydrogen-check-elimination.cc
@@ -48,12 +48,12 @@ typedef UniqueSet<Map>* MapSet;
struct HCheckTableEntry {
HValue* object_; // The object being approximated. NULL => invalid entry.
- HValue* check_; // The last check instruction.
- MapSet maps_; // The set of known maps for the object.
+ HInstruction* check_; // The last check instruction.
+ MapSet maps_; // The set of known maps for the object.
};
-// The main datastructure used during check elimination, which stores a
+// The main data structure used during check elimination, which stores a
// set of known maps for each object.
class HCheckTable : public ZoneObject {
public:
@@ -88,6 +88,10 @@ class HCheckTable : public ZoneObject {
ReduceCompareMap(HCompareMap::cast(instr));
break;
}
+ case HValue::kCompareObjectEqAndBranch: {
+ ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch::cast(instr));
+ break;
+ }
case HValue::kTransitionElementsKind: {
ReduceTransitionElementsKind(
HTransitionElementsKind::cast(instr));
@@ -103,8 +107,8 @@ class HCheckTable : public ZoneObject {
}
default: {
// If the instruction changes maps uncontrollably, drop everything.
- if (instr->CheckGVNFlag(kChangesMaps) ||
- instr->CheckGVNFlag(kChangesOsrEntries)) {
+ if (instr->CheckChangesFlag(kMaps) ||
+ instr->CheckChangesFlag(kOsrEntries)) {
Kill();
}
}
@@ -116,39 +120,105 @@ class HCheckTable : public ZoneObject {
return this;
}
- // Global analysis: Copy state to successor block.
+ // Support for global analysis with HFlowEngine: Merge given state with
+ // the other incoming state.
+ static HCheckTable* Merge(HCheckTable* succ_state, HBasicBlock* succ_block,
+ HCheckTable* pred_state, HBasicBlock* pred_block,
+ Zone* zone) {
+ if (pred_state == NULL || pred_block->IsUnreachable()) {
+ return succ_state;
+ }
+ if (succ_state == NULL) {
+ return pred_state->Copy(succ_block, pred_block, zone);
+ } else {
+ return succ_state->Merge(succ_block, pred_state, pred_block, zone);
+ }
+ }
+
+ // Support for global analysis with HFlowEngine: Given state merged with all
+ // the other incoming states, prepare it for use.
+ static HCheckTable* Finish(HCheckTable* state, HBasicBlock* block,
+ Zone* zone) {
+ if (state == NULL) {
+ block->MarkUnreachable();
+ } else if (block->IsUnreachable()) {
+ state = NULL;
+ }
+ if (FLAG_trace_check_elimination) {
+ PrintF("Processing B%d, checkmaps-table:\n", block->block_id());
+ Print(state);
+ }
+ return state;
+ }
+
+ private:
+ // Copy state to successor block.
HCheckTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, Zone* zone) {
HCheckTable* copy = new(phase_->zone()) HCheckTable(phase_);
for (int i = 0; i < size_; i++) {
HCheckTableEntry* old_entry = &entries_[i];
+ ASSERT(old_entry->maps_->size() > 0);
HCheckTableEntry* new_entry = &copy->entries_[i];
- // TODO(titzer): keep the check if this block dominates the successor?
new_entry->object_ = old_entry->object_;
- new_entry->check_ = NULL;
new_entry->maps_ = old_entry->maps_->Copy(phase_->zone());
+ // Keep the check if the existing check's block dominates the successor.
+ if (old_entry->check_ != NULL &&
+ old_entry->check_->block()->Dominates(succ)) {
+ new_entry->check_ = old_entry->check_;
+ } else {
+ // Leave it NULL till we meet a new check instruction for this object
+ // in the control flow.
+ new_entry->check_ = NULL;
+ }
}
copy->cursor_ = cursor_;
copy->size_ = size_;
+ // Create entries for succ block's phis.
+ if (!succ->IsLoopHeader() && succ->phis()->length() > 0) {
+ int pred_index = succ->PredecessorIndexOf(from_block);
+ for (int phi_index = 0;
+ phi_index < succ->phis()->length();
+ ++phi_index) {
+ HPhi* phi = succ->phis()->at(phi_index);
+ HValue* phi_operand = phi->OperandAt(pred_index);
+
+ HCheckTableEntry* pred_entry = copy->Find(phi_operand);
+ if (pred_entry != NULL) {
+ // Create an entry for a phi in the table.
+ copy->Insert(phi, NULL, pred_entry->maps_->Copy(phase_->zone()));
+ }
+ }
+ }
+
// Branch-sensitive analysis for certain comparisons may add more facts
// to the state for the successor on the true branch.
bool learned = false;
- HControlInstruction* end = succ->predecessors()->at(0)->end();
- if (succ->predecessors()->length() == 1 && end->SuccessorAt(0) == succ) {
+ if (succ->predecessors()->length() == 1) {
+ HControlInstruction* end = succ->predecessors()->at(0)->end();
+ bool is_true_branch = end->SuccessorAt(0) == succ;
if (end->IsCompareMap()) {
- // Learn on the true branch of if(CompareMap(x)).
HCompareMap* cmp = HCompareMap::cast(end);
HValue* object = cmp->value()->ActualValue();
HCheckTableEntry* entry = copy->Find(object);
- if (entry == NULL) {
- copy->Insert(object, cmp->map());
+ if (is_true_branch) {
+ // Learn on the true branch of if(CompareMap(x)).
+ if (entry == NULL) {
+ copy->Insert(object, cmp, cmp->map());
+ } else {
+ MapSet list = new(phase_->zone()) UniqueSet<Map>();
+ list->Add(cmp->map(), phase_->zone());
+ entry->maps_ = list;
+ entry->check_ = cmp;
+ }
} else {
- MapSet list = new(phase_->zone()) UniqueSet<Map>();
- list->Add(cmp->map(), phase_->zone());
- entry->maps_ = list;
+ // Learn on the false branch of if(CompareMap(x)).
+ if (entry != NULL) {
+ entry->maps_->Remove(cmp->map());
+ }
}
learned = true;
- } else if (end->IsCompareObjectEqAndBranch()) {
+ } else if (is_true_branch && end->IsCompareObjectEqAndBranch()) {
// Learn on the true branch of if(CmpObjectEq(x, y)).
HCompareObjectEqAndBranch* cmp =
HCompareObjectEqAndBranch::cast(end);
@@ -177,44 +247,54 @@ class HCheckTable : public ZoneObject {
succ->block_id(),
learned ? "learned" : "copied",
from_block->block_id());
- copy->Print();
+ Print(copy);
}
return copy;
}
- // Global analysis: Merge this state with the other incoming state.
+ // Merge this state with the other incoming state.
HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that,
- HBasicBlock* that_block, Zone* zone) {
- if (that_block->IsReachable()) {
- if (that->size_ == 0) {
- // If the other state is empty, simply reset.
- size_ = 0;
- cursor_ = 0;
- } else {
- bool compact = false;
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* this_entry = &entries_[i];
- HCheckTableEntry* that_entry = that->Find(this_entry->object_);
- if (that_entry == NULL) {
- this_entry->object_ = NULL;
- compact = true;
- } else {
- this_entry->maps_ =
- this_entry->maps_->Union(that_entry->maps_, phase_->zone());
- if (this_entry->check_ != that_entry->check_) {
- this_entry->check_ = NULL;
- }
- ASSERT(this_entry->maps_->size() > 0);
+ HBasicBlock* pred_block, Zone* zone) {
+ if (that->size_ == 0) {
+ // If the other state is empty, simply reset.
+ size_ = 0;
+ cursor_ = 0;
+ } else {
+ int pred_index = succ->PredecessorIndexOf(pred_block);
+ bool compact = false;
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* this_entry = &entries_[i];
+ HCheckTableEntry* that_entry;
+ if (this_entry->object_->IsPhi() &&
+ this_entry->object_->block() == succ) {
+ HPhi* phi = HPhi::cast(this_entry->object_);
+ HValue* phi_operand = phi->OperandAt(pred_index);
+ that_entry = that->Find(phi_operand);
+
+ } else {
+ that_entry = that->Find(this_entry->object_);
+ }
+
+ if (that_entry == NULL) {
+ this_entry->object_ = NULL;
+ compact = true;
+ } else {
+ this_entry->maps_ =
+ this_entry->maps_->Union(that_entry->maps_, phase_->zone());
+ if (this_entry->check_ != that_entry->check_) {
+ this_entry->check_ = NULL;
}
+ ASSERT(this_entry->maps_->size() > 0);
}
- if (compact) Compact();
}
+ if (compact) Compact();
}
+
if (FLAG_trace_check_elimination) {
PrintF("B%d checkmaps-table merged with B%d table:\n",
- succ->block_id(), that_block->block_id());
- Print();
+ succ->block_id(), pred_block->block_id());
+ Print(this);
}
return this;
}
@@ -244,14 +324,43 @@ class HCheckTable : public ZoneObject {
}
return;
}
- i = i->Intersect(a, phase_->zone());
- if (i->size() == 0) {
+ MapSet intersection = i->Intersect(a, phase_->zone());
+ if (intersection->size() == 0) {
// Intersection is empty; probably megamorphic, which is likely to
// deopt anyway, so just leave things as they are.
INC_STAT(empty_);
} else {
- // TODO(titzer): replace the first check with a more strict check
- INC_STAT(narrowed_);
+ // Update set of maps in the entry.
+ entry->maps_ = intersection;
+ if (intersection->size() != i->size()) {
+ // Narrow set of maps in the second check maps instruction.
+ HGraph* graph = instr->block()->graph();
+ if (entry->check_ != NULL &&
+ entry->check_->block() == instr->block() &&
+ entry->check_->IsCheckMaps()) {
+ // There is a check in the same block so replace it with a more
+ // strict check and eliminate the second check entirely.
+ HCheckMaps* check = HCheckMaps::cast(entry->check_);
+ TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(),
+ check->block()->block_id()));
+ // Update map set and ensure that the check is alive.
+ check->set_map_set(intersection, graph->zone());
+ check->ClearFlag(HValue::kIsDead);
+ TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
+ instr->id(), instr->block()->block_id(), entry->check_->id()));
+ instr->DeleteAndReplaceWith(entry->check_);
+ } else {
+ TRACE(("CheckMaps #%d at B%d narrowed\n", instr->id(),
+ instr->block()->block_id()));
+ instr->set_map_set(intersection, graph->zone());
+ entry->check_ = instr;
+ }
+
+ if (FLAG_trace_check_elimination) {
+ Print(this);
+ }
+ INC_STAT(narrowed_);
+ }
}
} else {
// No entry; insert a new one.
@@ -292,22 +401,32 @@ class HCheckTable : public ZoneObject {
HValue* object = instr->value()->ActualValue();
// Match a HCheckMapValue(object, HConstant(map))
Unique<Map> map = MapConstant(instr->map());
- MapSet maps = FindMaps(object);
- if (maps != NULL) {
+
+ HCheckTableEntry* entry = Find(object);
+ if (entry != NULL) {
+ MapSet maps = entry->maps_;
if (maps->Contains(map)) {
if (maps->size() == 1) {
// Object is known to have exactly this map.
- instr->DeleteAndReplaceWith(NULL);
+ if (entry->check_ != NULL) {
+ instr->DeleteAndReplaceWith(entry->check_);
+ } else {
+ // Mark check as dead but leave it in the graph as a checkpoint for
+ // subsequent checks.
+ instr->SetFlag(HValue::kIsDead);
+ entry->check_ = instr;
+ }
INC_STAT(removed_);
} else {
// Only one map survives the check.
maps->Clear();
maps->Add(map, phase_->zone());
+ entry->check_ = instr;
}
}
} else {
// No prior information.
- Insert(object, map);
+ Insert(object, instr, map);
}
}
@@ -324,34 +443,61 @@ class HCheckTable : public ZoneObject {
if (instr->has_transition()) {
// This store transitions the object to a new map.
Kill(object);
- Insert(object, MapConstant(instr->transition()));
+ Insert(object, NULL, MapConstant(instr->transition()));
} else if (IsMapAccess(instr->access())) {
// This is a store directly to the map field of the object.
Kill(object);
if (!instr->value()->IsConstant()) return;
- Insert(object, MapConstant(instr->value()));
+ Insert(object, NULL, MapConstant(instr->value()));
} else {
// If the instruction changes maps, it should be handled above.
- CHECK(!instr->CheckGVNFlag(kChangesMaps));
+ CHECK(!instr->CheckChangesFlag(kMaps));
}
}
void ReduceCompareMap(HCompareMap* instr) {
MapSet maps = FindMaps(instr->value()->ActualValue());
if (maps == NULL) return;
+
+ int succ;
if (maps->Contains(instr->map())) {
- if (maps->size() == 1) {
- TRACE(("Marking redundant CompareMap #%d at B%d as true\n",
- instr->id(), instr->block()->block_id()));
- instr->set_known_successor_index(0);
- INC_STAT(compares_true_);
+ if (maps->size() != 1) {
+ TRACE(("CompareMap #%d for #%d at B%d can't be eliminated: "
+ "ambiguous set of maps\n", instr->id(), instr->value()->id(),
+ instr->block()->block_id()));
+ return;
}
+ succ = 0;
+ INC_STAT(compares_true_);
} else {
- TRACE(("Marking redundant CompareMap #%d at B%d as false\n",
- instr->id(), instr->block()->block_id()));
- instr->set_known_successor_index(1);
+ succ = 1;
INC_STAT(compares_false_);
}
+
+ TRACE(("Marking redundant CompareMap #%d for #%d at B%d as %s\n",
+ instr->id(), instr->value()->id(), instr->block()->block_id(),
+ succ == 0 ? "true" : "false"));
+ instr->set_known_successor_index(succ);
+
+ int unreachable_succ = 1 - succ;
+ instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
+ }
+
+ void ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch* instr) {
+ MapSet maps_left = FindMaps(instr->left()->ActualValue());
+ if (maps_left == NULL) return;
+ MapSet maps_right = FindMaps(instr->right()->ActualValue());
+ if (maps_right == NULL) return;
+ MapSet intersection = maps_left->Intersect(maps_right, phase_->zone());
+ if (intersection->size() > 0) return;
+
+ TRACE(("Marking redundant CompareObjectEqAndBranch #%d at B%d as false\n",
+ instr->id(), instr->block()->block_id()));
+ int succ = 1;
+ instr->set_known_successor_index(succ);
+
+ int unreachable_succ = 1 - succ;
+ instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
}
void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
@@ -422,11 +568,17 @@ class HCheckTable : public ZoneObject {
cursor_ = size_; // Move cursor to end.
}
- void Print() {
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* entry = &entries_[i];
+ static void Print(HCheckTable* table) {
+ if (table == NULL) {
+ PrintF(" unreachable\n");
+ return;
+ }
+
+ for (int i = 0; i < table->size_; i++) {
+ HCheckTableEntry* entry = &table->entries_[i];
ASSERT(entry->object_ != NULL);
- PrintF(" checkmaps-table @%d: object #%d ", i, entry->object_->id());
+ PrintF(" checkmaps-table @%d: %s #%d ", i,
+ entry->object_->IsPhi() ? "phi" : "object", entry->object_->id());
if (entry->check_ != NULL) {
PrintF("check #%d ", entry->check_->id());
}
@@ -440,7 +592,6 @@ class HCheckTable : public ZoneObject {
}
}
- private:
HCheckTableEntry* Find(HValue* object) {
for (int i = size_ - 1; i >= 0; i--) {
// Search from most-recently-inserted to least-recently-inserted.
@@ -456,13 +607,13 @@ class HCheckTable : public ZoneObject {
return entry == NULL ? NULL : entry->maps_;
}
- void Insert(HValue* object, Unique<Map> map) {
+ void Insert(HValue* object, HInstruction* check, Unique<Map> map) {
MapSet list = new(phase_->zone()) UniqueSet<Map>();
list->Add(map, phase_->zone());
- Insert(object, NULL, list);
+ Insert(object, check, list);
}
- void Insert(HValue* object, HCheckMaps* check, MapSet maps) {
+ void Insert(HValue* object, HInstruction* check, MapSet maps) {
HCheckTableEntry* entry = &entries_[cursor_++];
entry->object_ = object;
entry->check_ = check;
@@ -481,6 +632,7 @@ class HCheckTable : public ZoneObject {
}
friend class HCheckMapsEffects;
+ friend class HCheckEliminationPhase;
HCheckEliminationPhase* phase_;
HCheckTableEntry entries_[kMaxTrackedObjects];
@@ -514,8 +666,8 @@ class HCheckMapsEffects : public ZoneObject {
maps_stored_ = true;
}
default: {
- maps_stored_ |= (instr->CheckGVNFlag(kChangesMaps) |
- instr->CheckGVNFlag(kChangesElementsKind));
+ maps_stored_ |= (instr->CheckChangesFlag(kMaps) |
+ instr->CheckChangesFlag(kElementsKind));
}
}
}
diff --git a/deps/v8/src/hydrogen-flow-engine.h b/deps/v8/src/hydrogen-flow-engine.h
index fe786a5c5c..99a2f841a7 100644
--- a/deps/v8/src/hydrogen-flow-engine.h
+++ b/deps/v8/src/hydrogen-flow-engine.h
@@ -122,9 +122,10 @@ class HFlowEngine {
// Skip blocks not dominated by the root node.
if (SkipNonDominatedBlock(root, block)) continue;
- State* state = StateAt(block);
+ State* state = State::Finish(StateAt(block), block, zone_);
if (block->IsReachable()) {
+ ASSERT(state != NULL);
if (block->IsLoopHeader()) {
// Apply loop effects before analyzing loop body.
ComputeLoopEffects(block)->Apply(state);
@@ -144,18 +145,14 @@ class HFlowEngine {
for (int i = 0; i < max; i++) {
HBasicBlock* succ = block->end()->SuccessorAt(i);
IncrementPredecessorCount(succ);
- if (StateAt(succ) == NULL) {
- // This is the first state to reach the successor.
- if (max == 1 && succ->predecessors()->length() == 1) {
- // Optimization: successor can inherit this state.
- SetStateAt(succ, state);
- } else {
- // Successor needs a copy of the state.
- SetStateAt(succ, state->Copy(succ, block, zone_));
- }
+
+ if (max == 1 && succ->predecessors()->length() == 1) {
+ // Optimization: successor can inherit this state.
+ SetStateAt(succ, state);
} else {
// Merge the current state with the state already at the successor.
- SetStateAt(succ, StateAt(succ)->Merge(succ, state, block, zone_));
+ SetStateAt(succ,
+ State::Merge(StateAt(succ), succ, state, block, zone_));
}
}
}
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index bc836890bb..4c98015bee 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -32,39 +32,39 @@
namespace v8 {
namespace internal {
-class HValueMap: public ZoneObject {
+class HInstructionMap V8_FINAL : public ZoneObject {
public:
- explicit HValueMap(Zone* zone)
+ HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker)
: array_size_(0),
lists_size_(0),
count_(0),
- present_flags_(0),
array_(NULL),
lists_(NULL),
- free_list_head_(kNil) {
+ free_list_head_(kNil),
+ side_effects_tracker_(side_effects_tracker) {
ResizeLists(kInitialSize, zone);
Resize(kInitialSize, zone);
}
- void Kill(GVNFlagSet flags);
+ void Kill(SideEffects side_effects);
- void Add(HValue* value, Zone* zone) {
- present_flags_.Add(value->gvn_flags());
- Insert(value, zone);
+ void Add(HInstruction* instr, Zone* zone) {
+ present_depends_on_.Add(side_effects_tracker_->ComputeDependsOn(instr));
+ Insert(instr, zone);
}
- HValue* Lookup(HValue* value) const;
+ HInstruction* Lookup(HInstruction* instr) const;
- HValueMap* Copy(Zone* zone) const {
- return new(zone) HValueMap(zone, this);
+ HInstructionMap* Copy(Zone* zone) const {
+ return new(zone) HInstructionMap(zone, this);
}
bool IsEmpty() const { return count_ == 0; }
private:
- // A linked list of HValue* values. Stored in arrays.
- struct HValueMapListElement {
- HValue* value;
+ // A linked list of HInstruction* values. Stored in arrays.
+ struct HInstructionMapListElement {
+ HInstruction* instr;
int next; // Index in the array of the next list element.
};
static const int kNil = -1; // The end of a linked list
@@ -72,34 +72,36 @@ class HValueMap: public ZoneObject {
// Must be a power of 2.
static const int kInitialSize = 16;
- HValueMap(Zone* zone, const HValueMap* other);
+ HInstructionMap(Zone* zone, const HInstructionMap* other);
void Resize(int new_size, Zone* zone);
void ResizeLists(int new_size, Zone* zone);
- void Insert(HValue* value, Zone* zone);
+ void Insert(HInstruction* instr, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
int lists_size_;
- int count_; // The number of values stored in the HValueMap.
- GVNFlagSet present_flags_; // All flags that are in any value in the
- // HValueMap.
- HValueMapListElement* array_; // Primary store - contains the first value
+ int count_; // The number of values stored in the HInstructionMap.
+ SideEffects present_depends_on_;
+ HInstructionMapListElement* array_;
+ // Primary store - contains the first value
// with a given hash. Colliding elements are stored in linked lists.
- HValueMapListElement* lists_; // The linked lists containing hash collisions.
+ HInstructionMapListElement* lists_;
+ // The linked lists containing hash collisions.
int free_list_head_; // Unused elements in lists_ are on the free list.
+ SideEffectsTracker* side_effects_tracker_;
};
-class HSideEffectMap BASE_EMBEDDED {
+class HSideEffectMap V8_FINAL BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
HSideEffectMap& operator= (const HSideEffectMap& other);
- void Kill(GVNFlagSet flags);
+ void Kill(SideEffects side_effects);
- void Store(GVNFlagSet flags, HInstruction* instr);
+ void Store(SideEffects side_effects, HInstruction* instr);
bool IsEmpty() const { return count_ == 0; }
@@ -152,35 +154,36 @@ void TraceGVN(const char* msg, ...) {
}
-HValueMap::HValueMap(Zone* zone, const HValueMap* other)
+HInstructionMap::HInstructionMap(Zone* zone, const HInstructionMap* other)
: array_size_(other->array_size_),
lists_size_(other->lists_size_),
count_(other->count_),
- present_flags_(other->present_flags_),
- array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
- lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
- free_list_head_(other->free_list_head_) {
+ present_depends_on_(other->present_depends_on_),
+ array_(zone->NewArray<HInstructionMapListElement>(other->array_size_)),
+ lists_(zone->NewArray<HInstructionMapListElement>(other->lists_size_)),
+ free_list_head_(other->free_list_head_),
+ side_effects_tracker_(other->side_effects_tracker_) {
OS::MemCopy(
- array_, other->array_, array_size_ * sizeof(HValueMapListElement));
+ array_, other->array_, array_size_ * sizeof(HInstructionMapListElement));
OS::MemCopy(
- lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
+ lists_, other->lists_, lists_size_ * sizeof(HInstructionMapListElement));
}
-void HValueMap::Kill(GVNFlagSet flags) {
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
- if (!present_flags_.ContainsAnyOf(depends_flags)) return;
- present_flags_.RemoveAll();
+void HInstructionMap::Kill(SideEffects changes) {
+ if (!present_depends_on_.ContainsAnyOf(changes)) return;
+ present_depends_on_.RemoveAll();
for (int i = 0; i < array_size_; ++i) {
- HValue* value = array_[i].value;
- if (value != NULL) {
+ HInstruction* instr = array_[i].instr;
+ if (instr != NULL) {
// Clear list of collisions first, so we know if it becomes empty.
int kept = kNil; // List of kept elements.
int next;
for (int current = array_[i].next; current != kNil; current = next) {
next = lists_[current].next;
- HValue* value = lists_[current].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
+ HInstruction* instr = lists_[current].instr;
+ SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
+ if (depends_on.ContainsAnyOf(changes)) {
// Drop it.
count_--;
lists_[current].next = free_list_head_;
@@ -189,40 +192,41 @@ void HValueMap::Kill(GVNFlagSet flags) {
// Keep it.
lists_[current].next = kept;
kept = current;
- present_flags_.Add(value->gvn_flags());
+ present_depends_on_.Add(depends_on);
}
}
array_[i].next = kept;
// Now possibly drop directly indexed element.
- value = array_[i].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
+ instr = array_[i].instr;
+ SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
+ if (depends_on.ContainsAnyOf(changes)) { // Drop it.
count_--;
int head = array_[i].next;
if (head == kNil) {
- array_[i].value = NULL;
+ array_[i].instr = NULL;
} else {
- array_[i].value = lists_[head].value;
+ array_[i].instr = lists_[head].instr;
array_[i].next = lists_[head].next;
lists_[head].next = free_list_head_;
free_list_head_ = head;
}
} else {
- present_flags_.Add(value->gvn_flags()); // Keep it.
+ present_depends_on_.Add(depends_on); // Keep it.
}
}
}
}
-HValue* HValueMap::Lookup(HValue* value) const {
- uint32_t hash = static_cast<uint32_t>(value->Hashcode());
+HInstruction* HInstructionMap::Lookup(HInstruction* instr) const {
+ uint32_t hash = static_cast<uint32_t>(instr->Hashcode());
uint32_t pos = Bound(hash);
- if (array_[pos].value != NULL) {
- if (array_[pos].value->Equals(value)) return array_[pos].value;
+ if (array_[pos].instr != NULL) {
+ if (array_[pos].instr->Equals(instr)) return array_[pos].instr;
int next = array_[pos].next;
while (next != kNil) {
- if (lists_[next].value->Equals(value)) return lists_[next].value;
+ if (lists_[next].instr->Equals(instr)) return lists_[next].instr;
next = lists_[next].next;
}
}
@@ -230,7 +234,7 @@ HValue* HValueMap::Lookup(HValue* value) const {
}
-void HValueMap::Resize(int new_size, Zone* zone) {
+void HInstructionMap::Resize(int new_size, Zone* zone) {
ASSERT(new_size > count_);
// Hashing the values into the new array has no more collisions than in the
// old hash map, so we can use the existing lists_ array, if we are careful.
@@ -240,33 +244,33 @@ void HValueMap::Resize(int new_size, Zone* zone) {
ResizeLists(lists_size_ << 1, zone);
}
- HValueMapListElement* new_array =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
+ HInstructionMapListElement* new_array =
+ zone->NewArray<HInstructionMapListElement>(new_size);
+ memset(new_array, 0, sizeof(HInstructionMapListElement) * new_size);
- HValueMapListElement* old_array = array_;
+ HInstructionMapListElement* old_array = array_;
int old_size = array_size_;
int old_count = count_;
count_ = 0;
- // Do not modify present_flags_. It is currently correct.
+ // Do not modify present_depends_on_. It is currently correct.
array_size_ = new_size;
array_ = new_array;
if (old_array != NULL) {
// Iterate over all the elements in lists, rehashing them.
for (int i = 0; i < old_size; ++i) {
- if (old_array[i].value != NULL) {
+ if (old_array[i].instr != NULL) {
int current = old_array[i].next;
while (current != kNil) {
- Insert(lists_[current].value, zone);
+ Insert(lists_[current].instr, zone);
int next = lists_[current].next;
lists_[current].next = free_list_head_;
free_list_head_ = current;
current = next;
}
- // Rehash the directly stored value.
- Insert(old_array[i].value, zone);
+ // Rehash the directly stored instruction.
+ Insert(old_array[i].instr, zone);
}
}
}
@@ -275,21 +279,22 @@ void HValueMap::Resize(int new_size, Zone* zone) {
}
-void HValueMap::ResizeLists(int new_size, Zone* zone) {
+void HInstructionMap::ResizeLists(int new_size, Zone* zone) {
ASSERT(new_size > lists_size_);
- HValueMapListElement* new_lists =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
+ HInstructionMapListElement* new_lists =
+ zone->NewArray<HInstructionMapListElement>(new_size);
+ memset(new_lists, 0, sizeof(HInstructionMapListElement) * new_size);
- HValueMapListElement* old_lists = lists_;
+ HInstructionMapListElement* old_lists = lists_;
int old_size = lists_size_;
lists_size_ = new_size;
lists_ = new_lists;
if (old_lists != NULL) {
- OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
+ OS::MemCopy(
+ lists_, old_lists, old_size * sizeof(HInstructionMapListElement));
}
for (int i = old_size; i < lists_size_; ++i) {
lists_[i].next = free_list_head_;
@@ -298,15 +303,15 @@ void HValueMap::ResizeLists(int new_size, Zone* zone) {
}
-void HValueMap::Insert(HValue* value, Zone* zone) {
- ASSERT(value != NULL);
+void HInstructionMap::Insert(HInstruction* instr, Zone* zone) {
+ ASSERT(instr != NULL);
// Resizing when half of the hashtable is filled up.
if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
ASSERT(count_ < array_size_);
count_++;
- uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
- if (array_[pos].value == NULL) {
- array_[pos].value = value;
+ uint32_t pos = Bound(static_cast<uint32_t>(instr->Hashcode()));
+ if (array_[pos].instr == NULL) {
+ array_[pos].instr = instr;
array_[pos].next = kNil;
} else {
if (free_list_head_ == kNil) {
@@ -315,9 +320,9 @@ void HValueMap::Insert(HValue* value, Zone* zone) {
int new_element_pos = free_list_head_;
ASSERT(new_element_pos != kNil);
free_list_head_ = lists_[free_list_head_].next;
- lists_[new_element_pos].value = value;
+ lists_[new_element_pos].instr = instr;
lists_[new_element_pos].next = array_[pos].next;
- ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
+ ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL);
array_[pos].next = new_element_pos;
}
}
@@ -341,10 +346,9 @@ HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
}
-void HSideEffectMap::Kill(GVNFlagSet flags) {
+void HSideEffectMap::Kill(SideEffects side_effects) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
+ if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
if (data_[i] != NULL) count_--;
data_[i] = NULL;
}
@@ -352,10 +356,9 @@ void HSideEffectMap::Kill(GVNFlagSet flags) {
}
-void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
+void HSideEffectMap::Store(SideEffects side_effects, HInstruction* instr) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
+ if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
if (data_[i] == NULL) count_++;
data_[i] = instr;
}
@@ -363,6 +366,152 @@ void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
}
+SideEffects SideEffectsTracker::ComputeChanges(HInstruction* instr) {
+ int index;
+ SideEffects result(instr->ChangesFlags());
+ if (result.ContainsFlag(kGlobalVars)) {
+ if (instr->IsStoreGlobalCell() &&
+ ComputeGlobalVar(HStoreGlobalCell::cast(instr)->cell(), &index)) {
+ result.RemoveFlag(kGlobalVars);
+ result.AddSpecial(GlobalVar(index));
+ } else {
+ for (index = 0; index < kNumberOfGlobalVars; ++index) {
+ result.AddSpecial(GlobalVar(index));
+ }
+ }
+ }
+ if (result.ContainsFlag(kInobjectFields)) {
+ if (instr->IsStoreNamedField() &&
+ ComputeInobjectField(HStoreNamedField::cast(instr)->access(), &index)) {
+ result.RemoveFlag(kInobjectFields);
+ result.AddSpecial(InobjectField(index));
+ } else {
+ for (index = 0; index < kNumberOfInobjectFields; ++index) {
+ result.AddSpecial(InobjectField(index));
+ }
+ }
+ }
+ return result;
+}
+
+
+SideEffects SideEffectsTracker::ComputeDependsOn(HInstruction* instr) {
+ int index;
+ SideEffects result(instr->DependsOnFlags());
+ if (result.ContainsFlag(kGlobalVars)) {
+ if (instr->IsLoadGlobalCell() &&
+ ComputeGlobalVar(HLoadGlobalCell::cast(instr)->cell(), &index)) {
+ result.RemoveFlag(kGlobalVars);
+ result.AddSpecial(GlobalVar(index));
+ } else {
+ for (index = 0; index < kNumberOfGlobalVars; ++index) {
+ result.AddSpecial(GlobalVar(index));
+ }
+ }
+ }
+ if (result.ContainsFlag(kInobjectFields)) {
+ if (instr->IsLoadNamedField() &&
+ ComputeInobjectField(HLoadNamedField::cast(instr)->access(), &index)) {
+ result.RemoveFlag(kInobjectFields);
+ result.AddSpecial(InobjectField(index));
+ } else {
+ for (index = 0; index < kNumberOfInobjectFields; ++index) {
+ result.AddSpecial(InobjectField(index));
+ }
+ }
+ }
+ return result;
+}
+
+
+void SideEffectsTracker::PrintSideEffectsTo(StringStream* stream,
+ SideEffects side_effects) const {
+ const char* separator = "";
+ stream->Add("[");
+ for (int bit = 0; bit < kNumberOfFlags; ++bit) {
+ GVNFlag flag = GVNFlagFromInt(bit);
+ if (side_effects.ContainsFlag(flag)) {
+ stream->Add(separator);
+ separator = ", ";
+ switch (flag) {
+#define DECLARE_FLAG(Type) \
+ case k##Type: \
+ stream->Add(#Type); \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ for (int index = 0; index < num_global_vars_; ++index) {
+ if (side_effects.ContainsSpecial(GlobalVar(index))) {
+ stream->Add(separator);
+ separator = ", ";
+ stream->Add("[%p]", *global_vars_[index].handle());
+ }
+ }
+ for (int index = 0; index < num_inobject_fields_; ++index) {
+ if (side_effects.ContainsSpecial(InobjectField(index))) {
+ stream->Add(separator);
+ separator = ", ";
+ inobject_fields_[index].PrintTo(stream);
+ }
+ }
+ stream->Add("]");
+}
+
+
+bool SideEffectsTracker::ComputeGlobalVar(Unique<Cell> cell, int* index) {
+ for (int i = 0; i < num_global_vars_; ++i) {
+ if (cell == global_vars_[i]) {
+ *index = i;
+ return true;
+ }
+ }
+ if (num_global_vars_ < kNumberOfGlobalVars) {
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Tracking global var [%p] (mapped to index %d)\n",
+ *cell.handle(), num_global_vars_);
+ stream.OutputToStdOut();
+ }
+ *index = num_global_vars_;
+ global_vars_[num_global_vars_++] = cell;
+ return true;
+ }
+ return false;
+}
+
+
+bool SideEffectsTracker::ComputeInobjectField(HObjectAccess access,
+ int* index) {
+ for (int i = 0; i < num_inobject_fields_; ++i) {
+ if (access.Equals(inobject_fields_[i])) {
+ *index = i;
+ return true;
+ }
+ }
+ if (num_inobject_fields_ < kNumberOfInobjectFields) {
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Tracking inobject field access ");
+ access.PrintTo(&stream);
+ stream.Add(" (mapped to index %d)\n", num_inobject_fields_);
+ stream.OutputToStdOut();
+ }
+ *index = num_inobject_fields_;
+ inobject_fields_[num_inobject_fields_++] = access;
+ return true;
+ }
+ return false;
+}
+
+
HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
: HPhase("H_Global value numbering", graph),
removed_side_effects_(false),
@@ -370,10 +519,10 @@ HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
loop_side_effects_(graph->blocks()->length(), zone()),
visited_on_paths_(graph->blocks()->length(), zone()) {
ASSERT(!AllowHandleAllocation::IsAllowed());
- block_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
- zone());
- loop_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
- zone());
+ block_side_effects_.AddBlock(
+ SideEffects(), graph->blocks()->length(), zone());
+ loop_side_effects_.AddBlock(
+ SideEffects(), graph->blocks()->length(), zone());
}
@@ -409,12 +558,12 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
// Compute side effects for the block.
HBasicBlock* block = graph()->blocks()->at(i);
- GVNFlagSet side_effects;
+ SideEffects side_effects;
if (block->IsReachable() && !block->IsDeoptimizing()) {
int id = block->block_id();
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- side_effects.Add(instr->ChangesFlags());
+ side_effects.Add(side_effects_tracker_.ComputeChanges(instr));
}
block_side_effects_[id].Add(side_effects);
@@ -438,103 +587,22 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
}
-SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
- char underlying_buffer[kNumberOfFlags * 128];
- Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
-#if DEBUG
- int offset = 0;
- const char* separator = "";
- const char* comma = ", ";
- buffer[0] = 0;
- uint32_t set_depends_on = 0;
- uint32_t set_changes = 0;
- for (int bit = 0; bit < kNumberOfFlags; ++bit) {
- if (flags.Contains(static_cast<GVNFlag>(bit))) {
- if (bit % 2 == 0) {
- set_changes++;
- } else {
- set_depends_on++;
- }
- }
- }
- bool positive_changes = set_changes < (kNumberOfFlags / 2);
- bool positive_depends_on = set_depends_on < (kNumberOfFlags / 2);
- if (set_changes > 0) {
- if (positive_changes) {
- offset += OS::SNPrintF(buffer + offset, "changes [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "changes all except [");
- }
- for (int bit = 0; bit < kNumberOfFlags; ++bit) {
- if (flags.Contains(static_cast<GVNFlag>(bit)) == positive_changes) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kChanges##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
- if (set_depends_on > 0) {
- separator = "";
- if (set_changes > 0) {
- offset += OS::SNPrintF(buffer + offset, ", ");
- }
- if (positive_depends_on) {
- offset += OS::SNPrintF(buffer + offset, "depends on [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "depends on all except [");
- }
- for (int bit = 0; bit < kNumberOfFlags; ++bit) {
- if (flags.Contains(static_cast<GVNFlag>(bit)) == positive_depends_on) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kDependsOn##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
-#else
- OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
-#endif
- size_t string_len = strlen(underlying_buffer) + 1;
- ASSERT(string_len <= sizeof(underlying_buffer));
- char* result = new char[strlen(underlying_buffer) + 1];
- OS::MemCopy(result, underlying_buffer, string_len);
- return SmartArrayPointer<char>(result);
-}
-
-
void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
graph()->use_optimistic_licm() ? "yes" : "no");
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph()->blocks()->at(i);
if (block->IsLoopHeader()) {
- GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
- TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
- block->block_id(),
- GetGVNFlagsString(side_effects).get());
-
+ SideEffects side_effects = loop_side_effects_[block->block_id()];
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Try loop invariant motion for block B%d changes ",
+ block->block_id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, side_effects);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
HBasicBlock* last = block->loop_information()->GetLastBackEdge();
for (int j = block->block_id(); j <= last->block_id(); ++j) {
ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects);
@@ -547,22 +615,37 @@ void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
void HGlobalValueNumberingPhase::ProcessLoopBlock(
HBasicBlock* block,
HBasicBlock* loop_header,
- GVNFlagSet loop_kills) {
+ SideEffects loop_kills) {
HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
- TRACE_GVN_2("Loop invariant motion for B%d %s\n",
- block->block_id(),
- GetGVNFlagsString(depends_flags).get());
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Loop invariant code motion for B%d depends on ",
+ block->block_id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
if (instr->CheckFlag(HValue::kUseGVN)) {
- TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
- instr->id(),
- instr->Mnemonic(),
- GetGVNFlagsString(instr->gvn_flags()).get(),
- GetGVNFlagsString(loop_kills).get());
- bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
+ SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
+ SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr);
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Checking instruction i%d (%s) changes ",
+ instr->id(), instr->Mnemonic());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
+ stream.Add(", depends on ");
+ side_effects_tracker_.PrintSideEffectsTo(&stream, depends_on);
+ stream.Add(". Loop changes ");
+ side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
+ bool can_hoist = !depends_on.ContainsAnyOf(loop_kills);
if (can_hoist && !graph()->use_optimistic_licm()) {
can_hoist = block->IsLoopSuccessorDominator();
}
@@ -604,10 +687,10 @@ bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
}
-GVNFlagSet
+SideEffects
HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
- GVNFlagSet side_effects;
+ SideEffects side_effects;
for (int i = 0; i < dominated->predecessors()->length(); ++i) {
HBasicBlock* block = dominated->predecessors()->at(i);
if (dominator->block_id() < block->block_id() &&
@@ -636,13 +719,13 @@ class GvnBasicBlockState: public ZoneObject {
public:
static GvnBasicBlockState* CreateEntry(Zone* zone,
HBasicBlock* entry_block,
- HValueMap* entry_map) {
+ HInstructionMap* entry_map) {
return new(zone)
GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
}
HBasicBlock* block() { return block_; }
- HValueMap* map() { return map_; }
+ HInstructionMap* map() { return map_; }
HSideEffectMap* dominators() { return &dominators_; }
GvnBasicBlockState* next_in_dominator_tree_traversal(
@@ -669,7 +752,7 @@ class GvnBasicBlockState: public ZoneObject {
private:
void Initialize(HBasicBlock* block,
- HValueMap* map,
+ HInstructionMap* map,
HSideEffectMap* dominators,
bool copy_map,
Zone* zone) {
@@ -685,7 +768,7 @@ class GvnBasicBlockState: public ZoneObject {
GvnBasicBlockState(GvnBasicBlockState* previous,
HBasicBlock* block,
- HValueMap* map,
+ HInstructionMap* map,
HSideEffectMap* dominators,
Zone* zone)
: previous_(previous), next_(NULL) {
@@ -732,7 +815,7 @@ class GvnBasicBlockState: public ZoneObject {
GvnBasicBlockState* previous_;
GvnBasicBlockState* next_;
HBasicBlock* block_;
- HValueMap* map_;
+ HInstructionMap* map_;
HSideEffectMap dominators_;
int dominated_index_;
int length_;
@@ -745,13 +828,14 @@ class GvnBasicBlockState: public ZoneObject {
// GvnBasicBlockState instances.
void HGlobalValueNumberingPhase::AnalyzeGraph() {
HBasicBlock* entry_block = graph()->entry_block();
- HValueMap* entry_map = new(zone()) HValueMap(zone());
+ HInstructionMap* entry_map =
+ new(zone()) HInstructionMap(zone(), &side_effects_tracker_);
GvnBasicBlockState* current =
GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
while (current != NULL) {
HBasicBlock* block = current->block();
- HValueMap* map = current->map();
+ HInstructionMap* map = current->map();
HSideEffectMap* dominators = current->dominators();
TRACE_GVN_2("Analyzing block B%d%s\n",
@@ -770,17 +854,15 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
HValue* other = dominators->at(i);
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(depends_on_flag) &&
- (other != NULL)) {
+ GVNFlag flag = GVNFlagFromInt(i);
+ if (instr->DependsOnFlags().Contains(flag) && other != NULL) {
TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
i,
instr->id(),
instr->Mnemonic(),
other->id(),
other->Mnemonic());
- if (instr->HandleSideEffectDominator(changes_flag, other)) {
+ if (instr->HandleSideEffectDominator(flag, other)) {
removed_side_effects_ = true;
}
}
@@ -789,21 +871,27 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
// Instruction was unlinked during graph traversal.
if (!instr->IsLinked()) continue;
- GVNFlagSet flags = instr->ChangesFlags();
- if (!flags.IsEmpty()) {
+ SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
+ if (!changes.IsEmpty()) {
// Clear all instructions in the map that are affected by side effects.
// Store instruction as the dominating one for tracked side effects.
- map->Kill(flags);
- dominators->Store(flags, instr);
- TRACE_GVN_2("Instruction %d %s\n", instr->id(),
- GetGVNFlagsString(flags).get());
+ map->Kill(changes);
+ dominators->Store(changes, instr);
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Instruction i%d changes ", instr->id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
}
if (instr->CheckFlag(HValue::kUseGVN)) {
ASSERT(!instr->HasObservableSideEffects());
- HValue* other = map->Lookup(instr);
+ HInstruction* other = map->Lookup(instr);
if (other != NULL) {
ASSERT(instr->Equals(other) && other->Equals(instr));
- TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
+ TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n",
instr->id(),
instr->Mnemonic(),
other->id(),
@@ -823,7 +911,7 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if (next != NULL) {
HBasicBlock* dominated = next->block();
- HValueMap* successor_map = next->map();
+ HInstructionMap* successor_map = next->map();
HSideEffectMap* successor_dominators = next->dominators();
// Kill everything killed on any path between this block and the
@@ -834,7 +922,7 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
dominator_block->block_id() + 1 < dominated->block_id()) {
visited_on_paths_.Clear();
- GVNFlagSet side_effects_on_all_paths =
+ SideEffects side_effects_on_all_paths =
CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
dominated);
successor_map->Kill(side_effects_on_all_paths);
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
index 30333cca61..d00dd05585 100644
--- a/deps/v8/src/hydrogen-gvn.h
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -36,15 +36,97 @@
namespace v8 {
namespace internal {
+// This class extends GVNFlagSet with additional "special" dynamic side effects,
+// which can be used to represent side effects that cannot be expressed using
+// the GVNFlags of an HInstruction. These special side effects are tracked by a
+// SideEffectsTracker (see below).
+class SideEffects V8_FINAL {
+ public:
+ static const int kNumberOfSpecials = 64 - kNumberOfFlags;
+
+ SideEffects() : bits_(0) {
+ ASSERT(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
+ }
+ explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {}
+ bool IsEmpty() const { return bits_ == 0; }
+ bool ContainsFlag(GVNFlag flag) const {
+ return (bits_ & MaskFlag(flag)) != 0;
+ }
+ bool ContainsSpecial(int special) const {
+ return (bits_ & MaskSpecial(special)) != 0;
+ }
+ bool ContainsAnyOf(SideEffects set) const { return (bits_ & set.bits_) != 0; }
+ void Add(SideEffects set) { bits_ |= set.bits_; }
+ void AddSpecial(int special) { bits_ |= MaskSpecial(special); }
+ void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); }
+ void RemoveAll() { bits_ = 0; }
+ uint64_t ToIntegral() const { return bits_; }
+ void PrintTo(StringStream* stream) const;
+
+ private:
+ uint64_t MaskFlag(GVNFlag flag) const {
+ return static_cast<uint64_t>(1) << static_cast<unsigned>(flag);
+ }
+ uint64_t MaskSpecial(int special) const {
+ ASSERT(special >= 0);
+ ASSERT(special < kNumberOfSpecials);
+ return static_cast<uint64_t>(1) << static_cast<unsigned>(
+ special + kNumberOfFlags);
+ }
+
+ uint64_t bits_;
+};
+
+
+// Tracks global variable and inobject field loads/stores in a fine grained
+// fashion, and represents them using the "special" dynamic side effects of the
+// SideEffects class (see above). This way unrelated global variable/inobject
+// field stores don't prevent hoisting and merging of global variable/inobject
+// field loads.
+class SideEffectsTracker V8_FINAL BASE_EMBEDDED {
+ public:
+ SideEffectsTracker() : num_global_vars_(0), num_inobject_fields_(0) {}
+ SideEffects ComputeChanges(HInstruction* instr);
+ SideEffects ComputeDependsOn(HInstruction* instr);
+ void PrintSideEffectsTo(StringStream* stream, SideEffects side_effects) const;
+
+ private:
+ bool ComputeGlobalVar(Unique<Cell> cell, int* index);
+ bool ComputeInobjectField(HObjectAccess access, int* index);
+
+ static int GlobalVar(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index < kNumberOfGlobalVars);
+ return index;
+ }
+ static int InobjectField(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index < kNumberOfInobjectFields);
+ return index + kNumberOfGlobalVars;
+ }
+
+ // Track up to four global vars.
+ static const int kNumberOfGlobalVars = 4;
+ Unique<Cell> global_vars_[kNumberOfGlobalVars];
+ int num_global_vars_;
+
+ // Track up to n inobject fields.
+ static const int kNumberOfInobjectFields =
+ SideEffects::kNumberOfSpecials - kNumberOfGlobalVars;
+ HObjectAccess inobject_fields_[kNumberOfInobjectFields];
+ int num_inobject_fields_;
+};
+
+
// Perform common subexpression elimination and loop-invariant code motion.
-class HGlobalValueNumberingPhase : public HPhase {
+class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
public:
explicit HGlobalValueNumberingPhase(HGraph* graph);
void Run();
private:
- GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
+ SideEffects CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator,
HBasicBlock* dominated);
void AnalyzeGraph();
@@ -52,17 +134,18 @@ class HGlobalValueNumberingPhase : public HPhase {
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
- GVNFlagSet loop_kills);
+ SideEffects loop_kills);
bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+ SideEffectsTracker side_effects_tracker_;
bool removed_side_effects_;
// A map of block IDs to their side effects.
- ZoneList<GVNFlagSet> block_side_effects_;
+ ZoneList<SideEffects> block_side_effects_;
// A map of loop header block IDs to their loop's side effects.
- ZoneList<GVNFlagSet> loop_side_effects_;
+ ZoneList<SideEffects> loop_side_effects_;
// Used when collecting side effects on paths from dominator to
// dominated.
@@ -71,7 +154,6 @@ class HGlobalValueNumberingPhase : public HPhase {
DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};
-
} } // namespace v8::internal
#endif // V8_HYDROGEN_GVN_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 2ca0c54a55..84dcb18248 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -30,11 +30,14 @@
#include "double.h"
#include "factory.h"
#include "hydrogen-infer-representation.h"
+#include "property-details-inl.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -604,11 +607,11 @@ void HValue::PrintChangesTo(StringStream* stream) {
stream->Add("*");
} else {
bool add_comma = false;
-#define PRINT_DO(type) \
- if (changes_flags.Contains(kChanges##type)) { \
- if (add_comma) stream->Add(","); \
- add_comma = true; \
- stream->Add(#type); \
+#define PRINT_DO(Type) \
+ if (changes_flags.Contains(k##Type)) { \
+ if (add_comma) stream->Add(","); \
+ add_comma = true; \
+ stream->Add(#Type); \
}
GVN_TRACKED_FLAG_LIST(PRINT_DO);
GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
@@ -680,6 +683,19 @@ void HValue::ComputeInitialRange(Zone* zone) {
}
+void HSourcePosition::PrintTo(FILE* out) {
+ if (IsUnknown()) {
+ PrintF(out, "<?>");
+ } else {
+ if (FLAG_hydrogen_track_positions) {
+ PrintF(out, "<%d:%d>", inlining_id(), position());
+ } else {
+ PrintF(out, "<0:%d>", raw());
+ }
+ }
+}
+
+
void HInstruction::PrintTo(StringStream* stream) {
PrintMnemonicTo(stream);
PrintDataTo(stream);
@@ -736,8 +752,7 @@ void HInstruction::InsertBefore(HInstruction* next) {
next_ = next;
previous_ = prev;
SetBlock(next->block());
- if (position() == RelocInfo::kNoPosition &&
- next->position() != RelocInfo::kNoPosition) {
+ if (!has_position() && next->has_position()) {
set_position(next->position());
}
}
@@ -774,8 +789,7 @@ void HInstruction::InsertAfter(HInstruction* previous) {
if (block->last() == previous) {
block->set_last(this);
}
- if (position() == RelocInfo::kNoPosition &&
- previous->position() != RelocInfo::kNoPosition) {
+ if (!has_position() && previous->has_position()) {
set_position(previous->position());
}
}
@@ -827,6 +841,107 @@ void HInstruction::Verify() {
#endif
+static bool HasPrimitiveRepresentation(HValue* instr) {
+ return instr->representation().IsInteger32() ||
+ instr->representation().IsDouble();
+}
+
+
+bool HInstruction::CanDeoptimize() {
+ // TODO(titzer): make this a virtual method?
+ switch (opcode()) {
+ case HValue::kAccessArgumentsAt:
+ case HValue::kApplyArguments:
+ case HValue::kArgumentsElements:
+ case HValue::kArgumentsLength:
+ case HValue::kArgumentsObject:
+ case HValue::kBoundsCheckBaseIndexInformation:
+ case HValue::kCapturedObject:
+ case HValue::kClampToUint8:
+ case HValue::kConstant:
+ case HValue::kContext:
+ case HValue::kDateField:
+ case HValue::kDebugBreak:
+ case HValue::kDeclareGlobals:
+ case HValue::kDiv:
+ case HValue::kDummyUse:
+ case HValue::kEnterInlined:
+ case HValue::kEnvironmentMarker:
+ case HValue::kForInCacheArray:
+ case HValue::kForInPrepareMap:
+ case HValue::kFunctionLiteral:
+ case HValue::kGetCachedArrayIndex:
+ case HValue::kGoto:
+ case HValue::kInnerAllocatedObject:
+ case HValue::kInstanceOf:
+ case HValue::kInstanceOfKnownGlobal:
+ case HValue::kInvokeFunction:
+ case HValue::kLeaveInlined:
+ case HValue::kLoadContextSlot:
+ case HValue::kLoadFieldByIndex:
+ case HValue::kLoadFunctionPrototype:
+ case HValue::kLoadGlobalCell:
+ case HValue::kLoadGlobalGeneric:
+ case HValue::kLoadKeyed:
+ case HValue::kLoadKeyedGeneric:
+ case HValue::kLoadNamedField:
+ case HValue::kLoadNamedGeneric:
+ case HValue::kLoadRoot:
+ case HValue::kMapEnumLength:
+ case HValue::kMathFloorOfDiv:
+ case HValue::kMathMinMax:
+ case HValue::kMod:
+ case HValue::kMul:
+ case HValue::kOsrEntry:
+ case HValue::kParameter:
+ case HValue::kPower:
+ case HValue::kPushArgument:
+ case HValue::kRor:
+ case HValue::kSar:
+ case HValue::kSeqStringGetChar:
+ case HValue::kSeqStringSetChar:
+ case HValue::kShl:
+ case HValue::kShr:
+ case HValue::kSimulate:
+ case HValue::kStackCheck:
+ case HValue::kStoreCodeEntry:
+ case HValue::kStoreContextSlot:
+ case HValue::kStoreGlobalCell:
+ case HValue::kStoreKeyed:
+ case HValue::kStoreKeyedGeneric:
+ case HValue::kStoreNamedField:
+ case HValue::kStoreNamedGeneric:
+ case HValue::kStringAdd:
+ case HValue::kStringCharCodeAt:
+ case HValue::kStringCharFromCode:
+ case HValue::kSub:
+ case HValue::kThisFunction:
+ case HValue::kToFastProperties:
+ case HValue::kTransitionElementsKind:
+ case HValue::kTrapAllocationMemento:
+ case HValue::kTypeof:
+ case HValue::kUnaryMathOperation:
+ case HValue::kUseConst:
+ case HValue::kWrapReceiver:
+ return false;
+ case HValue::kForceRepresentation:
+ case HValue::kAdd:
+ case HValue::kBitwise:
+ case HValue::kChange:
+ case HValue::kCompareGeneric:
+ // These instructions might deoptimize if they are not primitive.
+ if (!HasPrimitiveRepresentation(this)) return true;
+ for (int i = 0; i < OperandCount(); i++) {
+ HValue* input = OperandAt(i);
+ if (!HasPrimitiveRepresentation(input)) return true;
+ }
+ return false;
+ default:
+ return true;
+ }
+}
+
+
void HDummyUse::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@@ -1134,6 +1249,7 @@ const char* HUnaryMathOperation::OpName() const {
case kMathExp: return "exp";
case kMathSqrt: return "sqrt";
case kMathPowHalf: return "pow-half";
+ case kMathClz32: return "clz32";
default:
UNREACHABLE();
return NULL;
@@ -1143,6 +1259,7 @@ const char* HUnaryMathOperation::OpName() const {
Range* HUnaryMathOperation::InferRange(Zone* zone) {
Representation r = representation();
+ if (op() == kMathClz32) return new(zone) Range(0, 32);
if (r.IsSmiOrInteger32() && value()->HasRange()) {
if (op() == kMathAbs) {
int upper = value()->range()->upper();
@@ -1200,18 +1317,52 @@ void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" == %o", *type_literal_);
+ stream->Add(" == %o", *type_literal_.handle());
HControlInstruction::PrintDataTo(stream);
}
-bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (value()->representation().IsSpecialization()) {
- if (compares_number_type()) {
- *block = FirstSuccessor();
- } else {
- *block = SecondSuccessor();
+static String* TypeOfString(HConstant* constant, Isolate* isolate) {
+ Heap* heap = isolate->heap();
+ if (constant->HasNumberValue()) return heap->number_string();
+ if (constant->IsUndetectable()) return heap->undefined_string();
+ if (constant->HasStringValue()) return heap->string_string();
+ switch (constant->GetInstanceType()) {
+ case ODDBALL_TYPE: {
+ Unique<Object> unique = constant->GetUnique();
+ if (unique.IsKnownGlobal(heap->true_value()) ||
+ unique.IsKnownGlobal(heap->false_value())) {
+ return heap->boolean_string();
+ }
+ if (unique.IsKnownGlobal(heap->null_value())) {
+ return FLAG_harmony_typeof ? heap->null_string()
+ : heap->object_string();
+ }
+ ASSERT(unique.IsKnownGlobal(heap->undefined_value()));
+ return heap->undefined_string();
}
+ case SYMBOL_TYPE:
+ return heap->symbol_string();
+ case JS_FUNCTION_TYPE:
+ case JS_FUNCTION_PROXY_TYPE:
+ return heap->function_string();
+ default:
+ return heap->object_string();
+ }
+}
+
+
+bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ HConstant* constant = HConstant::cast(value());
+ String* type_string = TypeOfString(constant, isolate());
+ bool same_type = type_literal_.IsKnownGlobal(type_string);
+ *block = same_type ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ } else if (value()->representation().IsSpecialization()) {
+ bool number_type =
+ type_literal_.IsKnownGlobal(isolate()->heap()->number_string());
+ *block = number_type ? FirstSuccessor() : SecondSuccessor();
return true;
}
*block = NULL;
@@ -1384,19 +1535,19 @@ void HTypeof::PrintDataTo(StringStream* stream) {
HInstruction* HForceRepresentation::New(Zone* zone, HValue* context,
- HValue* value, Representation required_representation) {
+ HValue* value, Representation representation) {
if (FLAG_fold_constants && value->IsConstant()) {
HConstant* c = HConstant::cast(value);
if (c->HasNumberValue()) {
double double_res = c->DoubleValue();
- if (IsInt32Double(double_res)) {
+ if (representation.CanContainDouble(double_res)) {
return HConstant::New(zone, context,
static_cast<int32_t>(double_res),
- required_representation);
+ representation);
}
}
}
- return new(zone) HForceRepresentation(value, required_representation);
+ return new(zone) HForceRepresentation(value, representation);
}
@@ -1516,7 +1667,7 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
bool HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
- ASSERT(side_effect == kChangesMaps);
+ ASSERT(side_effect == kMaps);
// TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
// type information is rich enough we should generalize this to any HType
// for which the map is known.
@@ -1524,7 +1675,7 @@ bool HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
HStoreNamedField* store = HStoreNamedField::cast(dominator);
if (!store->has_transition() || store->object() != value()) return false;
HConstant* transition = HConstant::cast(store->transition());
- if (map_set_.Contains(transition->GetUnique())) {
+ if (map_set_.Contains(Unique<Map>::cast(transition->GetUnique()))) {
DeleteAndReplaceWith(NULL);
return true;
}
@@ -1552,9 +1703,7 @@ void HCheckValue::PrintDataTo(StringStream* stream) {
HValue* HCheckValue::Canonicalize() {
return (value()->IsConstant() &&
- HConstant::cast(value())->GetUnique() == object_)
- ? NULL
- : this;
+ HConstant::cast(value())->EqualsUnique(object_)) ? NULL : this;
}
@@ -1624,7 +1773,17 @@ Range* HChange::InferRange(Zone* zone) {
input_range != NULL &&
input_range->IsInSmiRange()))) {
set_type(HType::Smi());
- ClearGVNFlag(kChangesNewSpacePromotion);
+ ClearChangesFlag(kNewSpacePromotion);
+ }
+ if (to().IsSmiOrTagged() &&
+ input_range != NULL &&
+ input_range->IsInSmiRange() &&
+ (!SmiValuesAre32Bits() ||
+ !value()->CheckFlag(HValue::kUint32) ||
+ input_range->upper() != kMaxInt)) {
+ // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
+ // interval, so we treat kMaxInt as a sentinel for this entire interval.
+ ClearFlag(kCanOverflow);
}
Range* result = (input_range != NULL)
? input_range->Copy(zone)
@@ -1647,7 +1806,7 @@ Range* HConstant::InferRange(Zone* zone) {
}
-int HPhi::position() const {
+HSourcePosition HPhi::position() const {
return block()->first()->position();
}
@@ -1750,11 +1909,45 @@ Range* HDiv::InferRange(Zone* zone) {
(a->CanBeMinusZero() ||
(a->CanBeZero() && b->CanBeNegative())));
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
- ClearFlag(HValue::kCanOverflow);
+ ClearFlag(kCanOverflow);
}
if (!b->CanBeZero()) {
- ClearFlag(HValue::kCanBeDivByZero);
+ ClearFlag(kCanBeDivByZero);
+ }
+ return result;
+ } else {
+ return HValue::InferRange(zone);
+ }
+}
+
+
+Range* HMathFloorOfDiv::InferRange(Zone* zone) {
+ if (representation().IsInteger32()) {
+ Range* a = left()->range();
+ Range* b = right()->range();
+ Range* result = new(zone) Range();
+ result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ (a->CanBeMinusZero() ||
+ (a->CanBeZero() && b->CanBeNegative())));
+ if (!a->Includes(kMinInt)) {
+ ClearFlag(kLeftCanBeMinInt);
+ }
+
+ if (!a->CanBeNegative()) {
+ ClearFlag(HValue::kLeftCanBeNegative);
+ }
+
+ if (!a->CanBePositive()) {
+ ClearFlag(HValue::kLeftCanBePositive);
+ }
+
+ if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+ ClearFlag(kCanOverflow);
+ }
+
+ if (!b->CanBeZero()) {
+ ClearFlag(kCanBeDivByZero);
}
return result;
} else {
@@ -1781,6 +1974,10 @@ Range* HMod::InferRange(Zone* zone) {
result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
left_can_be_negative);
+ if (!a->CanBeNegative()) {
+ ClearFlag(HValue::kLeftCanBeNegative);
+ }
+
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
ClearFlag(HValue::kCanOverflow);
}
@@ -2487,13 +2684,16 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
has_int32_value_(false),
has_double_value_(false),
has_external_reference_value_(false),
- is_internalized_string_(false),
is_not_in_new_space_(true),
- is_cell_(false),
- boolean_value_(handle->BooleanValue()) {
+ boolean_value_(handle->BooleanValue()),
+ is_undetectable_(false),
+ instance_type_(kUnknownInstanceType) {
if (handle->IsHeapObject()) {
- Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
+ Handle<HeapObject> heap_obj = Handle<HeapObject>::cast(handle);
+ Heap* heap = heap_obj->GetHeap();
is_not_in_new_space_ = !heap->InNewSpace(*handle);
+ instance_type_ = heap_obj->map()->instance_type();
+ is_undetectable_ = heap_obj->map()->is_undetectable();
}
if (handle->IsNumber()) {
double n = handle->Number();
@@ -2503,12 +2703,8 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
double_value_ = n;
has_double_value_ = true;
// TODO(titzer): if this heap number is new space, tenure a new one.
- } else {
- is_internalized_string_ = handle->IsInternalizedString();
}
- is_cell_ = !handle.is_null() &&
- (handle->IsCell() || handle->IsPropertyCell());
Initialize(r);
}
@@ -2516,20 +2712,20 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
HConstant::HConstant(Unique<Object> unique,
Representation r,
HType type,
- bool is_internalize_string,
bool is_not_in_new_space,
- bool is_cell,
- bool boolean_value)
+ bool boolean_value,
+ bool is_undetectable,
+ InstanceType instance_type)
: HTemplateInstruction<0>(type),
object_(unique),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
has_external_reference_value_(false),
- is_internalized_string_(is_internalize_string),
is_not_in_new_space_(is_not_in_new_space),
- is_cell_(is_cell),
- boolean_value_(boolean_value) {
+ boolean_value_(boolean_value),
+ is_undetectable_(is_undetectable),
+ instance_type_(instance_type) {
ASSERT(!unique.handle().is_null());
ASSERT(!type.IsTaggedNumber());
Initialize(r);
@@ -2545,12 +2741,12 @@ HConstant::HConstant(int32_t integer_value,
has_int32_value_(true),
has_double_value_(true),
has_external_reference_value_(false),
- is_internalized_string_(false),
is_not_in_new_space_(is_not_in_new_space),
- is_cell_(false),
boolean_value_(integer_value != 0),
+ is_undetectable_(false),
int32_value_(integer_value),
- double_value_(FastI2D(integer_value)) {
+ double_value_(FastI2D(integer_value)),
+ instance_type_(kUnknownInstanceType) {
// It's possible to create a constant with a value in Smi-range but stored
// in a (pre-existing) HeapNumber. See crbug.com/349878.
bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
@@ -2568,12 +2764,12 @@ HConstant::HConstant(double double_value,
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
has_external_reference_value_(false),
- is_internalized_string_(false),
is_not_in_new_space_(is_not_in_new_space),
- is_cell_(false),
boolean_value_(double_value != 0 && !std::isnan(double_value)),
+ is_undetectable_(false),
int32_value_(DoubleToInt32(double_value)),
- double_value_(double_value) {
+ double_value_(double_value),
+ instance_type_(kUnknownInstanceType) {
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
// It's possible to create a constant with a value in Smi-range but stored
// in a (pre-existing) HeapNumber. See crbug.com/349878.
@@ -2591,11 +2787,11 @@ HConstant::HConstant(ExternalReference reference)
has_int32_value_(false),
has_double_value_(false),
has_external_reference_value_(true),
- is_internalized_string_(false),
is_not_in_new_space_(true),
- is_cell_(false),
boolean_value_(true),
- external_reference_value_(reference) {
+ is_undetectable_(false),
+ external_reference_value_(reference),
+ instance_type_(kUnknownInstanceType) {
Initialize(Representation::External());
}
@@ -2694,10 +2890,10 @@ HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
return new(zone) HConstant(object_,
r,
type_,
- is_internalized_string_,
is_not_in_new_space_,
- is_cell_,
- boolean_value_);
+ boolean_value_,
+ is_undetectable_,
+ instance_type_);
}
@@ -3011,12 +3207,70 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (left()->IsConstant() && right()->IsConstant()) {
- bool comparison_result =
- HConstant::cast(left())->DataEquals(HConstant::cast(right()));
- *block = comparison_result
- ? FirstSuccessor()
- : SecondSuccessor();
+ if (known_successor_index() != kNoKnownSuccessorIndex) {
+ *block = SuccessorAt(known_successor_index());
+ return true;
+ }
+ if (FLAG_fold_constants && left()->IsConstant() && right()->IsConstant()) {
+ *block = HConstant::cast(left())->DataEquals(HConstant::cast(right()))
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool ConstantIsObject(HConstant* constant, Isolate* isolate) {
+ if (constant->HasNumberValue()) return false;
+ if (constant->GetUnique().IsKnownGlobal(isolate->heap()->null_value())) {
+ return true;
+ }
+ if (constant->IsUndetectable()) return false;
+ InstanceType type = constant->GetInstanceType();
+ return (FIRST_NONCALLABLE_SPEC_OBJECT_TYPE <= type) &&
+ (type <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+}
+
+
+bool HIsObjectAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ *block = ConstantIsObject(HConstant::cast(value()), isolate())
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ *block = HConstant::cast(value())->HasStringValue()
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool HIsUndetectableAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ *block = HConstant::cast(value())->IsUndetectable()
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool HHasInstanceTypeAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ InstanceType type = HConstant::cast(value())->GetInstanceType();
+ *block = (from_ <= type) && (type <= to_)
+ ? FirstSuccessor() : SecondSuccessor();
return true;
}
*block = NULL;
@@ -3031,6 +3285,14 @@ void HCompareHoleAndBranch::InferRepresentation(
bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ HConstant* constant = HConstant::cast(value());
+ if (constant->HasDoubleValue()) {
+ *block = IsMinusZero(constant->DoubleValue())
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ }
if (value()->representation().IsSmiOrInteger32()) {
// A Smi or Integer32 cannot contain minus zero.
*block = SecondSuccessor();
@@ -3358,7 +3620,8 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
void HInnerAllocatedObject::PrintDataTo(StringStream* stream) {
base_object()->PrintNameTo(stream);
- stream->Add(" offset %d", offset());
+ stream->Add(" offset ");
+ offset()->PrintTo(stream);
}
@@ -3422,7 +3685,7 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {
bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
- ASSERT(side_effect == kChangesNewSpacePromotion);
+ ASSERT(side_effect == kNewSpacePromotion);
Zone* zone = block()->zone();
if (!FLAG_use_allocation_folding) return false;
@@ -3435,6 +3698,15 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
return false;
}
+ // Check whether we are folding within the same block for local folding.
+ if (FLAG_use_local_allocation_folding && dominator->block() != block()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), crosses basic blocks\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return false;
+ }
+
HAllocate* dominator_allocate = HAllocate::cast(dominator);
HValue* dominator_size = dominator_allocate->size();
HValue* current_size = size();
@@ -3683,99 +3955,6 @@ void HAllocate::PrintDataTo(StringStream* stream) {
}
-HValue* HUnaryMathOperation::EnsureAndPropagateNotMinusZero(
- BitVector* visited) {
- visited->Add(id());
- if (representation().IsSmiOrInteger32() &&
- !value()->representation().Equals(representation())) {
- if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- }
- if (RequiredInputRepresentation(0).IsSmiOrInteger32() &&
- representation().Equals(RequiredInputRepresentation(0))) {
- return value();
- }
- return NULL;
-}
-
-
-HValue* HChange::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (from().IsSmiOrInteger32()) return NULL;
- if (CanTruncateToInt32()) return NULL;
- if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- ASSERT(!from().IsSmiOrInteger32() || !to().IsSmiOrInteger32());
- return NULL;
-}
-
-
-HValue* HForceRepresentation::EnsureAndPropagateNotMinusZero(
- BitVector* visited) {
- visited->Add(id());
- return value();
-}
-
-
-HValue* HMod::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- return left();
- }
- return NULL;
-}
-
-
-HValue* HDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- return NULL;
-}
-
-
-HValue* HMathFloorOfDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- SetFlag(kBailoutOnMinusZero);
- return NULL;
-}
-
-
-HValue* HMul::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- return NULL;
-}
-
-
-HValue* HSub::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- // Propagate to the left argument. If the left argument cannot be -0, then
- // the result of the add operation cannot be either.
- if (range() == NULL || range()->CanBeMinusZero()) {
- return left();
- }
- return NULL;
-}
-
-
-HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- // Propagate to the left argument. If the left argument cannot be -0, then
- // the result of the sub operation cannot be either.
- if (range() == NULL || range()->CanBeMinusZero()) {
- return left();
- }
- return NULL;
-}
-
-
bool HStoreKeyed::NeedsCanonicalization() {
// If value is an integer or smi or comes from the result of a keyed load or
// constant then it is either be a non-hole value or in the case of a constant
@@ -3846,9 +4025,15 @@ HInstruction* HStringAdd::New(Zone* zone,
HConstant* c_right = HConstant::cast(right);
HConstant* c_left = HConstant::cast(left);
if (c_left->HasStringValue() && c_right->HasStringValue()) {
- Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
- c_left->StringValue(), c_right->StringValue());
- return HConstant::New(zone, context, concat);
+ Handle<String> left_string = c_left->StringValue();
+ Handle<String> right_string = c_right->StringValue();
+ // Prevent possible exception by invalid string length.
+ if (left_string->length() + right_string->length() < String::kMaxLength) {
+ Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
+ c_left->StringValue(), c_right->StringValue());
+ ASSERT(!concat.is_null());
+ return HConstant::New(zone, context, concat);
+ }
}
}
return new(zone) HStringAdd(
@@ -3864,6 +4049,7 @@ void HStringAdd::PrintDataTo(StringStream* stream) {
} else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_RIGHT) {
stream->Add("_CheckRight");
}
+ HBinaryOperation::PrintDataTo(stream);
stream->Add(" (");
if (pretenure_flag() == NOT_TENURED) stream->Add("N");
else if (pretenure_flag() == TENURED) stream->Add("D");
@@ -3913,6 +4099,8 @@ HInstruction* HUnaryMathOperation::New(
case kMathRound:
case kMathFloor:
return H_CONSTANT_DOUBLE(d);
+ case kMathClz32:
+ return H_CONSTANT_INT(32);
default:
UNREACHABLE();
break;
@@ -3938,6 +4126,11 @@ HInstruction* HUnaryMathOperation::New(
return H_CONSTANT_DOUBLE(std::floor(d + 0.5));
case kMathFloor:
return H_CONSTANT_DOUBLE(std::floor(d));
+ case kMathClz32: {
+ uint32_t i = DoubleToUint32(d);
+ return H_CONSTANT_INT(
+ (i == 0) ? 32 : CompilerIntrinsics::CountLeadingZeros(i));
+ }
default:
UNREACHABLE();
break;
@@ -4400,56 +4593,80 @@ HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
}
-void HObjectAccess::SetGVNFlags(HValue *instr, bool is_store) {
+void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) {
// set the appropriate GVN flags for a given load or store instruction
- if (is_store) {
+ if (access_type == STORE) {
// track dominating allocations in order to eliminate write barriers
- instr->SetGVNFlag(kDependsOnNewSpacePromotion);
+ instr->SetDependsOnFlag(::v8::internal::kNewSpacePromotion);
instr->SetFlag(HValue::kTrackSideEffectDominators);
} else {
// try to GVN loads, but don't hoist above map changes
instr->SetFlag(HValue::kUseGVN);
- instr->SetGVNFlag(kDependsOnMaps);
+ instr->SetDependsOnFlag(::v8::internal::kMaps);
}
switch (portion()) {
case kArrayLengths:
- instr->SetGVNFlag(is_store
- ? kChangesArrayLengths : kDependsOnArrayLengths);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kArrayLengths);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kArrayLengths);
+ }
break;
case kStringLengths:
- instr->SetGVNFlag(is_store
- ? kChangesStringLengths : kDependsOnStringLengths);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kStringLengths);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kStringLengths);
+ }
break;
case kInobject:
- instr->SetGVNFlag(is_store
- ? kChangesInobjectFields : kDependsOnInobjectFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kInobjectFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kInobjectFields);
+ }
break;
case kDouble:
- instr->SetGVNFlag(is_store
- ? kChangesDoubleFields : kDependsOnDoubleFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kDoubleFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kDoubleFields);
+ }
break;
case kBackingStore:
- instr->SetGVNFlag(is_store
- ? kChangesBackingStoreFields : kDependsOnBackingStoreFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kBackingStoreFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kBackingStoreFields);
+ }
break;
case kElementsPointer:
- instr->SetGVNFlag(is_store
- ? kChangesElementsPointer : kDependsOnElementsPointer);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kElementsPointer);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kElementsPointer);
+ }
break;
case kMaps:
- instr->SetGVNFlag(is_store
- ? kChangesMaps : kDependsOnMaps);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kMaps);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kMaps);
+ }
break;
case kExternalMemory:
- instr->SetGVNFlag(is_store
- ? kChangesExternalMemory : kDependsOnExternalMemory);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kExternalMemory);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kExternalMemory);
+ }
break;
}
}
-void HObjectAccess::PrintTo(StringStream* stream) {
+void HObjectAccess::PrintTo(StringStream* stream) const {
stream->Add(".");
switch (portion()) {
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index a62f3cebf2..1e6ac19bf7 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -102,12 +102,14 @@ class LChunkBuilder;
V(CompareObjectEqAndBranch) \
V(CompareMap) \
V(Constant) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
V(Div) \
+ V(DoubleBits) \
V(DummyUse) \
V(EnterInlined) \
V(EnvironmentMarker) \
@@ -224,6 +226,9 @@ class LChunkBuilder;
}
+enum PropertyAccessType { LOAD, STORE };
+
+
class Range V8_FINAL : public ZoneObject {
public:
Range()
@@ -473,22 +478,28 @@ class HUseIterator V8_FINAL BASE_EMBEDDED {
};
-// There must be one corresponding kDepends flag for every kChanges flag and
-// the order of the kChanges flags must be exactly the same as of the kDepends
-// flags. All tracked flags should appear before untracked ones.
+// All tracked flags should appear before untracked ones.
enum GVNFlag {
// Declare global value numbering flags.
-#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
+#define DECLARE_FLAG(Type) k##Type,
GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
#undef DECLARE_FLAG
- kNumberOfFlags,
-#define COUNT_FLAG(type) + 1
- kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG)
+#define COUNT_FLAG(Type) + 1
+ kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG),
+ kNumberOfUntrackedSideEffects = 0 GVN_UNTRACKED_FLAG_LIST(COUNT_FLAG),
#undef COUNT_FLAG
+ kNumberOfFlags = kNumberOfTrackedSideEffects + kNumberOfUntrackedSideEffects
};
+static inline GVNFlag GVNFlagFromInt(int i) {
+ ASSERT(i >= 0);
+ ASSERT(i < kNumberOfFlags);
+ return static_cast<GVNFlag>(i);
+}
+
+
class DecompositionResult V8_FINAL BASE_EMBEDDED {
public:
DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
@@ -534,7 +545,62 @@ class DecompositionResult V8_FINAL BASE_EMBEDDED {
};
-typedef EnumSet<GVNFlag, int64_t> GVNFlagSet;
+typedef EnumSet<GVNFlag, int32_t> GVNFlagSet;
+
+
+// This class encapsulates encoding and decoding of sources positions from
+// which hydrogen values originated.
+// When FLAG_track_hydrogen_positions is set this object encodes the
+// identifier of the inlining and absolute offset from the start of the
+// inlined function.
+// When the flag is not set we simply track absolute offset from the
+// script start.
+class HSourcePosition {
+ public:
+ HSourcePosition(const HSourcePosition& other) : value_(other.value_) { }
+
+ static HSourcePosition Unknown() {
+ return HSourcePosition(RelocInfo::kNoPosition);
+ }
+
+ bool IsUnknown() const { return value_ == RelocInfo::kNoPosition; }
+
+ int position() const { return PositionField::decode(value_); }
+ void set_position(int position) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ = static_cast<int>(PositionField::update(value_, position));
+ } else {
+ value_ = position;
+ }
+ }
+
+ int inlining_id() const { return InliningIdField::decode(value_); }
+ void set_inlining_id(int inlining_id) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ = static_cast<int>(InliningIdField::update(value_, inlining_id));
+ }
+ }
+
+ int raw() const { return value_; }
+
+ void PrintTo(FILE* f);
+
+ private:
+ typedef BitField<int, 0, 9> InliningIdField;
+
+ // Offset from the start of the inlined function.
+ typedef BitField<int, 9, 22> PositionField;
+
+ // On HPositionInfo can use this constructor.
+ explicit HSourcePosition(int value) : value_(value) { }
+
+ friend class HPositionInfo;
+
+ // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
+ // and PositionField.
+ // Otherwise contains absolute offset from the script start.
+ int value_;
+};
class HValue : public ZoneObject {
@@ -556,6 +622,9 @@ class HValue : public ZoneObject {
kCanOverflow,
kBailoutOnMinusZero,
kCanBeDivByZero,
+ kLeftCanBeMinInt,
+ kLeftCanBeNegative,
+ kLeftCanBePositive,
kAllowUndefinedAsNaN,
kIsArguments,
kTruncatingToInt32,
@@ -585,18 +654,6 @@ class HValue : public ZoneObject {
STATIC_ASSERT(kLastFlag < kBitsPerInt);
- static const int kChangesToDependsFlagsLeftShift = 1;
-
- static GVNFlag ChangesFlagFromInt(int x) {
- return static_cast<GVNFlag>(x * 2);
- }
- static GVNFlag DependsOnFlagFromInt(int x) {
- return static_cast<GVNFlag>(x * 2 + 1);
- }
- static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
- return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
- }
-
static HValue* cast(HValue* value) { return value; }
enum Opcode {
@@ -630,8 +687,12 @@ class HValue : public ZoneObject {
flags_(0) {}
virtual ~HValue() {}
- virtual int position() const { return RelocInfo::kNoPosition; }
- virtual int operand_position(int index) const { return position(); }
+ virtual HSourcePosition position() const {
+ return HSourcePosition::Unknown();
+ }
+ virtual HSourcePosition operand_position(int index) const {
+ return position();
+ }
HBasicBlock* block() const { return block_; }
void SetBlock(HBasicBlock* block);
@@ -681,21 +742,6 @@ class HValue : public ZoneObject {
return representation_.IsHeapObject() || type_.IsHeapObject();
}
- // An operation needs to override this function iff:
- // 1) it can produce an int32 output.
- // 2) the true value of its output can potentially be minus zero.
- // The implementation must set a flag so that it bails out in the case where
- // it would otherwise output what should be a minus zero as an int32 zero.
- // If the operation also exists in a form that takes int32 and outputs int32
- // then the operation should return its input value so that we can propagate
- // back. There are three operations that need to propagate back to more than
- // one input. They are phi and binary div and mul. They always return NULL
- // and expect the caller to take care of things.
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- return NULL;
- }
-
// There are HInstructions that do not really change a value, they
// only add pieces of information to it (like bounds checks, map checks,
// smi checks...).
@@ -772,43 +818,38 @@ class HValue : public ZoneObject {
// of uses is non-empty.
bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
- GVNFlagSet gvn_flags() const { return gvn_flags_; }
- void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
- void ClearGVNFlag(GVNFlag f) { gvn_flags_.Remove(f); }
- bool CheckGVNFlag(GVNFlag f) const { return gvn_flags_.Contains(f); }
- void SetAllSideEffects() { gvn_flags_.Add(AllSideEffectsFlagSet()); }
+ GVNFlagSet ChangesFlags() const { return changes_flags_; }
+ GVNFlagSet DependsOnFlags() const { return depends_on_flags_; }
+ void SetChangesFlag(GVNFlag f) { changes_flags_.Add(f); }
+ void SetDependsOnFlag(GVNFlag f) { depends_on_flags_.Add(f); }
+ void ClearChangesFlag(GVNFlag f) { changes_flags_.Remove(f); }
+ void ClearDependsOnFlag(GVNFlag f) { depends_on_flags_.Remove(f); }
+ bool CheckChangesFlag(GVNFlag f) const {
+ return changes_flags_.Contains(f);
+ }
+ bool CheckDependsOnFlag(GVNFlag f) const {
+ return depends_on_flags_.Contains(f);
+ }
+ void SetAllSideEffects() { changes_flags_.Add(AllSideEffectsFlagSet()); }
void ClearAllSideEffects() {
- gvn_flags_.Remove(AllSideEffectsFlagSet());
+ changes_flags_.Remove(AllSideEffectsFlagSet());
}
bool HasSideEffects() const {
- return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
+ return changes_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
}
bool HasObservableSideEffects() const {
return !CheckFlag(kHasNoObservableSideEffects) &&
- gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
- }
-
- GVNFlagSet DependsOnFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllDependsOnFlagSet());
- return result;
+ changes_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
}
GVNFlagSet SideEffectFlags() const {
- GVNFlagSet result = gvn_flags_;
+ GVNFlagSet result = ChangesFlags();
result.Intersect(AllSideEffectsFlagSet());
return result;
}
- GVNFlagSet ChangesFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllChangesFlagSet());
- return result;
- }
-
GVNFlagSet ObservableChangesFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllChangesFlagSet());
+ GVNFlagSet result = ChangesFlags();
result.Intersect(AllObservableSideEffectsFlagSet());
return result;
}
@@ -816,11 +857,6 @@ class HValue : public ZoneObject {
Range* range() const { return range_; }
// TODO(svenpanne) We should really use the null object pattern here.
bool HasRange() const { return range_ != NULL; }
- bool CanBeNegative() const { return !HasRange() || range()->CanBeNegative(); }
- bool CanBeZero() const { return !HasRange() || range()->CanBeZero(); }
- bool RangeCanInclude(int value) const {
- return !HasRange() || range()->Includes(value);
- }
void AddNewRange(Range* r, Zone* zone);
void RemoveLastAddedRange();
void ComputeInitialRange(Zone* zone);
@@ -949,20 +985,9 @@ class HValue : public ZoneObject {
representation_ = r;
}
- static GVNFlagSet AllDependsOnFlagSet() {
+ static GVNFlagSet AllFlagSet() {
GVNFlagSet result;
- // Create changes mask.
-#define ADD_FLAG(type) result.Add(kDependsOn##type);
- GVN_TRACKED_FLAG_LIST(ADD_FLAG)
- GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
-#undef ADD_FLAG
- return result;
- }
-
- static GVNFlagSet AllChangesFlagSet() {
- GVNFlagSet result;
- // Create changes mask.
-#define ADD_FLAG(type) result.Add(kChanges##type);
+#define ADD_FLAG(Type) result.Add(k##Type);
GVN_TRACKED_FLAG_LIST(ADD_FLAG)
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
#undef ADD_FLAG
@@ -971,19 +996,19 @@ class HValue : public ZoneObject {
// A flag mask to mark an instruction as having arbitrary side effects.
static GVNFlagSet AllSideEffectsFlagSet() {
- GVNFlagSet result = AllChangesFlagSet();
- result.Remove(kChangesOsrEntries);
+ GVNFlagSet result = AllFlagSet();
+ result.Remove(kOsrEntries);
return result;
}
// A flag mask of all side effects that can make observable changes in
// an executing program (i.e. are not safe to repeat, move or remove);
static GVNFlagSet AllObservableSideEffectsFlagSet() {
- GVNFlagSet result = AllChangesFlagSet();
- result.Remove(kChangesNewSpacePromotion);
- result.Remove(kChangesElementsKind);
- result.Remove(kChangesElementsPointer);
- result.Remove(kChangesMaps);
+ GVNFlagSet result = AllFlagSet();
+ result.Remove(kNewSpacePromotion);
+ result.Remove(kElementsKind);
+ result.Remove(kElementsPointer);
+ result.Remove(kMaps);
return result;
}
@@ -1004,7 +1029,8 @@ class HValue : public ZoneObject {
HUseListNode* use_list_;
Range* range_;
int flags_;
- GVNFlagSet gvn_flags_;
+ GVNFlagSet changes_flags_;
+ GVNFlagSet depends_on_flags_;
private:
virtual bool IsDeletable() const { return false; }
@@ -1103,25 +1129,22 @@ class HValue : public ZoneObject {
// In the first case it contains intruction's position as a tagged value.
// In the second case it points to an array which contains instruction's
// position and operands' positions.
-// TODO(vegorov): what we really want to track here is a combination of
-// source position and a script id because cross script inlining can easily
-// result in optimized functions composed of several scripts.
class HPositionInfo {
public:
explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
- int position() const {
+ HSourcePosition position() const {
if (has_operand_positions()) {
- return static_cast<int>(operand_positions()[kInstructionPosIndex]);
+ return operand_positions()[kInstructionPosIndex];
}
- return static_cast<int>(UntagPosition(data_));
+ return HSourcePosition(static_cast<int>(UntagPosition(data_)));
}
- void set_position(int pos) {
+ void set_position(HSourcePosition pos) {
if (has_operand_positions()) {
operand_positions()[kInstructionPosIndex] = pos;
} else {
- data_ = TagPosition(pos);
+ data_ = TagPosition(pos.raw());
}
}
@@ -1131,27 +1154,27 @@ class HPositionInfo {
}
const int length = kFirstOperandPosIndex + operand_count;
- intptr_t* positions =
- zone->NewArray<intptr_t>(length);
+ HSourcePosition* positions =
+ zone->NewArray<HSourcePosition>(length);
for (int i = 0; i < length; i++) {
- positions[i] = RelocInfo::kNoPosition;
+ positions[i] = HSourcePosition::Unknown();
}
- const int pos = position();
+ const HSourcePosition pos = position();
data_ = reinterpret_cast<intptr_t>(positions);
set_position(pos);
ASSERT(has_operand_positions());
}
- int operand_position(int idx) const {
+ HSourcePosition operand_position(int idx) const {
if (!has_operand_positions()) {
return position();
}
- return static_cast<int>(*operand_position_slot(idx));
+ return *operand_position_slot(idx);
}
- void set_operand_position(int idx, int pos) {
+ void set_operand_position(int idx, HSourcePosition pos) {
*operand_position_slot(idx) = pos;
}
@@ -1159,7 +1182,7 @@ class HPositionInfo {
static const intptr_t kInstructionPosIndex = 0;
static const intptr_t kFirstOperandPosIndex = 1;
- intptr_t* operand_position_slot(int idx) const {
+ HSourcePosition* operand_position_slot(int idx) const {
ASSERT(has_operand_positions());
return &(operand_positions()[kFirstOperandPosIndex + idx]);
}
@@ -1168,9 +1191,9 @@ class HPositionInfo {
return !IsTaggedPosition(data_);
}
- intptr_t* operand_positions() const {
+ HSourcePosition* operand_positions() const {
ASSERT(has_operand_positions());
- return reinterpret_cast<intptr_t*>(data_);
+ return reinterpret_cast<HSourcePosition*>(data_);
}
static const intptr_t kPositionTag = 1;
@@ -1218,23 +1241,23 @@ class HInstruction : public HValue {
}
// The position is a write-once variable.
- virtual int position() const V8_OVERRIDE {
- return position_.position();
+ virtual HSourcePosition position() const V8_OVERRIDE {
+ return HSourcePosition(position_.position());
}
bool has_position() const {
- return position_.position() != RelocInfo::kNoPosition;
+ return !position().IsUnknown();
}
- void set_position(int position) {
+ void set_position(HSourcePosition position) {
ASSERT(!has_position());
- ASSERT(position != RelocInfo::kNoPosition);
+ ASSERT(!position.IsUnknown());
position_.set_position(position);
}
- virtual int operand_position(int index) const V8_OVERRIDE {
- const int pos = position_.operand_position(index);
- return (pos != RelocInfo::kNoPosition) ? pos : position();
+ virtual HSourcePosition operand_position(int index) const V8_OVERRIDE {
+ const HSourcePosition pos = position_.operand_position(index);
+ return pos.IsUnknown() ? position() : pos;
}
- void set_operand_position(Zone* zone, int index, int pos) {
+ void set_operand_position(Zone* zone, int index, HSourcePosition pos) {
ASSERT(0 <= index && index < OperandCount());
position_.ensure_storage_for_operand_positions(zone, OperandCount());
position_.set_operand_position(index, pos);
@@ -1248,6 +1271,8 @@ class HInstruction : public HValue {
virtual void Verify() V8_OVERRIDE;
#endif
+ bool CanDeoptimize();
+
virtual bool HasStackCheck() { return false; }
DECLARE_ABSTRACT_INSTRUCTION(Instruction)
@@ -1258,7 +1283,7 @@ class HInstruction : public HValue {
next_(NULL),
previous_(NULL),
position_(RelocInfo::kNoPosition) {
- SetGVNFlag(kDependsOnOsrEntries);
+ SetDependsOnFlag(kOsrEntries);
}
virtual void DeleteFromGraph() V8_OVERRIDE { Unlink(); }
@@ -1679,9 +1704,6 @@ class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
HValue* value() { return OperandAt(0); }
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation(); // Same as the output representation.
}
@@ -1710,6 +1732,7 @@ class HChange V8_FINAL : public HUnaryOperation {
ASSERT(!value->representation().Equals(to));
set_representation(to);
SetFlag(kUseGVN);
+ SetFlag(kCanOverflow);
if (is_truncating_to_smi) {
SetFlag(kTruncatingToSmi);
SetFlag(kTruncatingToInt32);
@@ -1719,7 +1742,7 @@ class HChange V8_FINAL : public HUnaryOperation {
set_type(HType::Smi());
} else {
set_type(HType::TaggedNumber());
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
}
@@ -1727,8 +1750,6 @@ class HChange V8_FINAL : public HUnaryOperation {
return CheckUsesForFlag(kAllowUndefinedAsNaN);
}
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
virtual HType CalculateInferredType() V8_OVERRIDE;
virtual HValue* Canonicalize() V8_OVERRIDE;
@@ -1782,6 +1803,65 @@ class HClampToUint8 V8_FINAL : public HUnaryOperation {
};
+class HDoubleBits V8_FINAL : public HUnaryOperation {
+ public:
+ enum Bits { HIGH, LOW };
+ DECLARE_INSTRUCTION_FACTORY_P2(HDoubleBits, HValue*, Bits);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Double();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits)
+
+ Bits bits() { return bits_; }
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ return other->IsDoubleBits() && HDoubleBits::cast(other)->bits() == bits();
+ }
+
+ private:
+ HDoubleBits(HValue* value, Bits bits)
+ : HUnaryOperation(value), bits_(bits) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ Bits bits_;
+};
+
+
+class HConstructDouble V8_FINAL : public HTemplateInstruction<2> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P2(HConstructDouble, HValue*, HValue*);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Integer32();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble)
+
+ HValue* hi() { return OperandAt(0); }
+ HValue* lo() { return OperandAt(1); }
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ explicit HConstructDouble(HValue* hi, HValue* lo) {
+ set_representation(Representation::Double());
+ SetFlag(kUseGVN);
+ SetOperandAt(0, hi);
+ SetOperandAt(1, lo);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+};
+
+
enum RemovableSimulate {
REMOVABLE_SIMULATE,
FIXED_SIMULATE
@@ -1967,7 +2047,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
private:
HStackCheck(HValue* context, Type type) : type_(type) {
SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
Type type_;
@@ -2515,7 +2595,7 @@ class HMapEnumLength V8_FINAL : public HUnaryOperation {
: HUnaryOperation(value, HType::Smi()) {
set_representation(Representation::Smi());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
+ SetDependsOnFlag(kMaps);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -2534,9 +2614,6 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) {
return Representation::Tagged();
@@ -2551,6 +2628,8 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
return Representation::Double();
case kMathAbs:
return representation();
+ case kMathClz32:
+ return Representation::Integer32();
default:
UNREACHABLE();
return Representation::None();
@@ -2582,6 +2661,7 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
switch (op) {
case kMathFloor:
case kMathRound:
+ case kMathClz32:
set_representation(Representation::Integer32());
break;
case kMathAbs:
@@ -2589,7 +2669,7 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
SetFlag(kFlexibleRepresentation);
// TODO(svenpanne) This flag is actually only needed if representation()
// is tagged, and not when it is an unboxed double or unboxed integer.
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
break;
case kMathLog:
case kMathExp:
@@ -2638,7 +2718,7 @@ class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
SetFlag(kUseGVN);
// TODO(bmeurer): We'll need kDependsOnRoots once we add the
// corresponding HStoreRoot instruction.
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -2651,10 +2731,10 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
public:
static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
Handle<Map> map, CompilationInfo* info,
- HValue *typecheck = NULL);
+ HValue* typecheck = NULL);
static HCheckMaps* New(Zone* zone, HValue* context,
HValue* value, SmallMapList* maps,
- HValue *typecheck = NULL) {
+ HValue* typecheck = NULL) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
for (int i = 0; i < maps->length(); i++) {
check_map->Add(maps->at(i), zone);
@@ -2673,10 +2753,18 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
+ HValue* typecheck() { return OperandAt(1); }
Unique<Map> first_map() const { return map_set_.at(0); }
UniqueSet<Map> map_set() const { return map_set_; }
+ void set_map_set(UniqueSet<Map>* maps, Zone *zone) {
+ map_set_.Clear();
+ for (int i = 0; i < maps->size(); i++) {
+ map_set_.Add(maps->at(i), zone);
+ }
+ }
+
bool has_migration_target() const {
return has_migration_target_;
}
@@ -2693,9 +2781,12 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
private:
void Add(Handle<Map> map, Zone* zone) {
map_set_.Add(Unique<Map>(map), zone);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kElementsKind);
+
if (!has_migration_target_ && map->is_migration_target()) {
has_migration_target_ = true;
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
}
@@ -2709,8 +2800,6 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
}
bool omit_;
@@ -3149,7 +3238,7 @@ class HPhi V8_FINAL : public HValue {
bool IsReceiver() const { return merged_index_ == 0; }
bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
- virtual int position() const V8_OVERRIDE;
+ virtual HSourcePosition position() const V8_OVERRIDE;
int merged_index() const { return merged_index_; }
@@ -3314,7 +3403,7 @@ class HCapturedObject V8_FINAL : public HDematerializedObject {
void ReuseSideEffectsFromStore(HInstruction* store) {
ASSERT(store->HasObservableSideEffects());
ASSERT(store->IsStoreNamedField());
- gvn_flags_.Add(store->gvn_flags());
+ changes_flags_.Add(store->ChangesFlags());
}
// Replay effects of this instruction on the given environment.
@@ -3365,8 +3454,8 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool is_not_in_new_space,
HInstruction* instruction) {
return instruction->Prepend(new(zone) HConstant(
- unique, Representation::Tagged(), HType::Tagged(), false,
- is_not_in_new_space, false, false));
+ unique, Representation::Tagged(), HType::Tagged(),
+ is_not_in_new_space, false, false, kUnknownInstanceType));
}
Handle<Object> handle(Isolate* isolate) {
@@ -3401,7 +3490,7 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool ImmortalImmovable() const;
bool IsCell() const {
- return is_cell_;
+ return instance_type_ == CELL_TYPE || instance_type_ == PROPERTY_CELL_TYPE;
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -3449,14 +3538,14 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool HasStringValue() const {
if (has_double_value_ || has_int32_value_) return false;
ASSERT(!object_.handle().is_null());
- return type_.IsString();
+ return instance_type_ < FIRST_NONSTRING_TYPE;
}
Handle<String> StringValue() const {
ASSERT(HasStringValue());
return Handle<String>::cast(object_.handle());
}
bool HasInternalizedStringValue() const {
- return HasStringValue() && is_internalized_string_;
+ return HasStringValue() && StringShape(instance_type_).IsInternalized();
}
bool HasExternalReferenceValue() const {
@@ -3468,6 +3557,8 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool HasBooleanValue() const { return type_.IsBoolean(); }
bool BooleanValue() const { return boolean_value_; }
+ bool IsUndetectable() const { return is_undetectable_; }
+ InstanceType GetInstanceType() const { return instance_type_; }
virtual intptr_t Hashcode() V8_OVERRIDE {
if (has_int32_value_) {
@@ -3493,6 +3584,10 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return object_;
}
+ bool EqualsUnique(Unique<Object> other) const {
+ return object_.IsInitialized() && object_ == other;
+ }
+
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HConstant* other_constant = HConstant::cast(other);
if (has_int32_value_) {
@@ -3540,10 +3635,10 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
HConstant(Unique<Object> unique,
Representation r,
HType type,
- bool is_internalized_string,
bool is_not_in_new_space,
- bool is_cell,
- bool boolean_value);
+ bool boolean_value,
+ bool is_undetectable,
+ InstanceType instance_type);
explicit HConstant(ExternalReference reference);
@@ -3566,13 +3661,15 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
bool has_external_reference_value_ : 1;
- bool is_internalized_string_ : 1; // TODO(yangguo): make this part of HType.
bool is_not_in_new_space_ : 1;
- bool is_cell_ : 1;
bool boolean_value_ : 1;
+ bool is_undetectable_: 1;
int32_t int32_value_;
double double_value_;
ExternalReference external_reference_value_;
+
+ static const InstanceType kUnknownInstanceType = FILLER_TYPE;
+ InstanceType instance_type_;
};
@@ -3654,11 +3751,19 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return representation();
}
- void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
+ void SetOperandPositions(Zone* zone,
+ HSourcePosition left_pos,
+ HSourcePosition right_pos) {
set_operand_position(zone, 1, left_pos);
set_operand_position(zone, 2, right_pos);
}
+ bool RightIsPowerOf2() {
+ if (!right()->IsInteger32Constant()) return false;
+ int32_t value = right()->GetInteger32Constant();
+ return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
private:
@@ -3947,7 +4052,6 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -3956,6 +4060,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
virtual void UpdateRepresentation(Representation new_rep,
@@ -3990,9 +4095,6 @@ class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
HValue*,
HValue*);
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
protected:
@@ -4004,12 +4106,15 @@ class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetFlag(kCanOverflow);
- if (!right->IsConstant()) {
- SetFlag(kCanBeDivByZero);
- }
+ SetFlag(kCanBeDivByZero);
+ SetFlag(kLeftCanBeMinInt);
+ SetFlag(kLeftCanBeNegative);
+ SetFlag(kLeftCanBePositive);
SetFlag(kAllowUndefinedAsNaN);
}
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -4024,7 +4129,6 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -4033,12 +4137,7 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
- }
-
- bool RightIsPowerOf2() {
- if (!right()->IsInteger32Constant()) return false;
- int32_t value = right()->GetInteger32Constant();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
@@ -4109,7 +4208,9 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
+ void SetOperandPositions(Zone* zone,
+ HSourcePosition left_pos,
+ HSourcePosition right_pos) {
set_operand_position(zone, 0, left_pos);
set_operand_position(zone, 1, right_pos);
}
@@ -4192,6 +4293,12 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ static const int kNoKnownSuccessorIndex = -1;
+ int known_successor_index() const { return known_successor_index_; }
+ void set_known_successor_index(int known_successor_index) {
+ known_successor_index_ = known_successor_index;
+ }
+
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
@@ -4211,7 +4318,8 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
HCompareObjectEqAndBranch(HValue* left,
HValue* right,
HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL) {
+ HBasicBlock* false_target = NULL)
+ : known_successor_index_(kNoKnownSuccessorIndex) {
ASSERT(!left->IsConstant() ||
(!HConstant::cast(left)->HasInteger32Value() ||
HConstant::cast(left)->HasSmiValue()));
@@ -4223,6 +4331,8 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
SetSuccessorAt(0, true_target);
SetSuccessorAt(1, false_target);
}
+
+ int known_successor_index_;
};
@@ -4236,6 +4346,8 @@ class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
private:
@@ -4256,6 +4368,8 @@ class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
protected:
@@ -4289,7 +4403,9 @@ class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
HIsSmiAndBranch(HValue* value,
HBasicBlock* true_target = NULL,
HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target) {}
+ : HUnaryControlInstruction(value, true_target, false_target) {
+ set_representation(Representation::Tagged());
+ }
};
@@ -4303,6 +4419,8 @@ class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
private:
@@ -4348,7 +4466,7 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
SetOperandAt(1, left);
SetOperandAt(2, right);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
Token::Value token_;
@@ -4385,6 +4503,8 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
private:
@@ -4466,8 +4586,7 @@ class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
- Handle<String> type_literal() { return type_literal_; }
- bool compares_number_type() { return compares_number_type_; }
+ Handle<String> type_literal() { return type_literal_.handle(); }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
@@ -4478,16 +4597,16 @@ class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ type_literal_ = Unique<String>(type_literal_.handle());
+ }
+
private:
HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
: HUnaryControlInstruction(value, NULL, NULL),
- type_literal_(type_literal) {
- Heap* heap = type_literal->GetHeap();
- compares_number_type_ = type_literal->Equals(heap->number_string());
- }
+ type_literal_(Unique<String>::CreateUninitialized(type_literal)) { }
- Handle<String> type_literal_;
- bool compares_number_type_ : 1;
+ Unique<String> type_literal_;
};
@@ -4573,7 +4692,7 @@ class HPower V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, right);
set_representation(Representation::Double());
SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE {
@@ -4596,9 +4715,6 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
return !representation().IsTagged() && !representation().IsExternal();
}
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
@@ -4614,10 +4730,6 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) {
- SetGVNFlag(kChangesNewSpacePromotion);
- ClearFlag(kAllowUndefinedAsNaN);
- }
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
@@ -4627,6 +4739,10 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
+ if (to.IsTagged()) {
+ SetChangesFlag(kNewSpacePromotion);
+ ClearFlag(kAllowUndefinedAsNaN);
+ }
}
virtual Representation RepresentationFromInputs() V8_OVERRIDE;
@@ -4655,9 +4771,6 @@ class HSub V8_FINAL : public HArithmeticBinaryOperation {
HValue* left,
HValue* right);
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
@@ -4704,9 +4817,6 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
return mul;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
// Only commutative if it is certain that not two objects are multiplicated.
@@ -4744,9 +4854,6 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
HValue* left,
HValue* right);
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
@@ -4769,6 +4876,7 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
HValue* right) : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
+ SetFlag(kLeftCanBeNegative);
}
};
@@ -4780,9 +4888,6 @@ class HDiv V8_FINAL : public HArithmeticBinaryOperation {
HValue* left,
HValue* right);
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
@@ -5072,8 +5177,8 @@ class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
private:
explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
- SetGVNFlag(kChangesOsrEntries);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kOsrEntries);
+ SetChangesFlag(kNewSpacePromotion);
}
BailoutId ast_id_;
@@ -5215,7 +5320,7 @@ class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
: cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnGlobalVars);
+ SetDependsOnFlag(kGlobalVars);
}
virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
@@ -5367,8 +5472,8 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, size);
set_representation(Representation::Tagged());
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kChangesNewSpacePromotion);
- SetGVNFlag(kDependsOnNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
+ SetDependsOnFlag(kNewSpacePromotion);
if (FLAG_trace_pretenuring) {
PrintF("HAllocate with AllocationSite %p %s\n",
@@ -5566,7 +5671,7 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
: HUnaryOperation(value),
cell_(Unique<PropertyCell>::CreateUninitialized(cell)),
details_(details) {
- SetGVNFlag(kChangesGlobalVars);
+ SetChangesFlag(kGlobalVars);
}
Unique<PropertyCell> cell_;
@@ -5594,10 +5699,10 @@ class HLoadContextSlot V8_FINAL : public HUnaryOperation {
ASSERT(var->IsContextSlot());
switch (var->mode()) {
case LET:
- case CONST_HARMONY:
+ case CONST:
mode_ = kCheckDeoptimize;
break;
- case CONST:
+ case CONST_LEGACY:
mode_ = kCheckReturnUndefined;
break;
default:
@@ -5605,7 +5710,7 @@ class HLoadContextSlot V8_FINAL : public HUnaryOperation {
}
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnContextSlots);
+ SetDependsOnFlag(kContextSlots);
}
int slot_index() const { return slot_index_; }
@@ -5689,7 +5794,7 @@ class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
: slot_index_(slot_index), mode_(mode) {
SetOperandAt(0, context);
SetOperandAt(1, value);
- SetGVNFlag(kChangesContextSlots);
+ SetChangesFlag(kContextSlots);
}
int slot_index_;
@@ -5773,9 +5878,8 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(
kArrayLengths,
JSArray::kLengthOffset,
- IsFastElementsKind(elements_kind) &&
- FLAG_track_fields
- ? Representation::Smi() : Representation::Tagged());
+ IsFastElementsKind(elements_kind)
+ ? Representation::Smi() : Representation::Tagged());
}
static HObjectAccess ForAllocationSiteOffset(int offset);
@@ -5789,7 +5893,7 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(
kArrayLengths,
FixedArray::kLengthOffset,
- FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
+ Representation::Smi());
}
static HObjectAccess ForStringHashField() {
@@ -5803,7 +5907,7 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(
kStringLengths,
String::kLengthOffset,
- FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
+ Representation::Smi());
}
static HObjectAccess ForConsStringFirst() {
@@ -5834,18 +5938,6 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset);
}
- static HObjectAccess ForFirstCodeSlot() {
- return HObjectAccess(kInobject, SharedFunctionInfo::kFirstCodeSlot);
- }
-
- static HObjectAccess ForFirstContextSlot() {
- return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot);
- }
-
- static HObjectAccess ForFirstOsrAstIdSlot() {
- return HObjectAccess(kInobject, SharedFunctionInfo::kFirstOsrAstIdSlot);
- }
-
static HObjectAccess ForOptimizedCodeMap() {
return HObjectAccess(kInobject,
SharedFunctionInfo::kOptimizedCodeMapOffset);
@@ -5967,14 +6059,14 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kInobject, GlobalObject::kNativeContextOffset);
}
- void PrintTo(StringStream* stream);
+ void PrintTo(StringStream* stream) const;
inline bool Equals(HObjectAccess that) const {
return value_ == that.value_; // portion and offset must match
}
protected:
- void SetGVNFlags(HValue *instr, bool is_store);
+ void SetGVNFlags(HValue *instr, PropertyAccessType access_type);
private:
// internal use only; different parts of an object or array
@@ -5989,6 +6081,8 @@ class HObjectAccess V8_FINAL {
kExternalMemory // some field in external memory
};
+ HObjectAccess() : value_(0) {}
+
HObjectAccess(Portion portion, int offset,
Representation representation = Representation::Tagged(),
Handle<String> name = Handle<String>::null(),
@@ -6021,6 +6115,7 @@ class HObjectAccess V8_FINAL {
friend class HLoadNamedField;
friend class HStoreNamedField;
+ friend class SideEffectsTracker;
inline Portion portion() const {
return PortionField::decode(value_);
@@ -6091,14 +6186,13 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
representation.IsExternal() ||
representation.IsInteger32()) {
set_representation(representation);
- } else if (FLAG_track_heap_object_fields &&
- representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
set_type(HType::NonPrimitive());
set_representation(Representation::Tagged());
} else {
set_representation(Representation::Tagged());
}
- access.SetGVNFlags(this, false);
+ access.SetGVNFlags(this, LOAD);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -6157,7 +6251,7 @@ class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
: HUnaryOperation(function) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
};
@@ -6302,10 +6396,10 @@ class HLoadKeyed V8_FINAL
set_representation(Representation::Tagged());
}
- SetGVNFlag(kDependsOnArrayElements);
+ SetDependsOnFlag(kArrayElements);
} else {
set_representation(Representation::Double());
- SetGVNFlag(kDependsOnDoubleArrayElements);
+ SetDependsOnFlag(kDoubleArrayElements);
}
} else {
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
@@ -6318,14 +6412,14 @@ class HLoadKeyed V8_FINAL
}
if (is_external()) {
- SetGVNFlag(kDependsOnExternalMemory);
+ SetDependsOnFlag(kExternalMemory);
} else if (is_fixed_typed_array()) {
- SetGVNFlag(kDependsOnTypedArrayElements);
+ SetDependsOnFlag(kTypedArrayElements);
} else {
UNREACHABLE();
}
// Native code could change the specialized array.
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
SetFlag(kUseGVN);
@@ -6449,7 +6543,8 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
}
virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
- ASSERT(side_effect == kChangesNewSpacePromotion);
+ ASSERT(side_effect == kNewSpacePromotion);
+ if (!FLAG_use_write_barrier_elimination) return false;
new_space_dominator_ = dominator;
return false;
}
@@ -6489,8 +6584,7 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
}
bool NeedsWriteBarrier() {
- ASSERT(!(FLAG_track_double_fields && field_representation().IsDouble()) ||
- !has_transition());
+ ASSERT(!field_representation().IsDouble() || !has_transition());
if (IsSkipWriteBarrier()) return false;
if (field_representation().IsDouble()) return false;
if (field_representation().IsSmi()) return false;
@@ -6525,7 +6619,6 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
write_barrier_mode_(UPDATE_WRITE_BARRIER),
has_transition_(false),
store_mode_(store_mode) {
- if (!FLAG_smi_x64_store_opt) store_mode_ = INITIALIZING_STORE;
// Stores to a non existing in-object property are allowed only to the
// newly allocated objects (via HAllocate or HInnerAllocatedObject).
ASSERT(!access.IsInobject() || access.existing_inobject_property() ||
@@ -6533,7 +6626,7 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
SetOperandAt(0, obj);
SetOperandAt(1, val);
SetOperandAt(2, obj);
- access.SetGVNFlags(this, true);
+ access.SetGVNFlags(this, STORE);
}
HObjectAccess access_;
@@ -6548,12 +6641,12 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*,
Handle<String>, HValue*,
- StrictModeFlag);
+ StrictMode);
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
Handle<String> name() { return name_; }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
+ StrictMode strict_mode() { return strict_mode_; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -6568,9 +6661,9 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
HValue* object,
Handle<String> name,
HValue* value,
- StrictModeFlag strict_mode_flag)
+ StrictMode strict_mode)
: name_(name),
- strict_mode_flag_(strict_mode_flag) {
+ strict_mode_(strict_mode) {
SetOperandAt(0, object);
SetOperandAt(1, value);
SetOperandAt(2, context);
@@ -6578,7 +6671,7 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
}
Handle<String> name_;
- StrictModeFlag strict_mode_flag_;
+ StrictMode strict_mode_;
};
@@ -6681,7 +6774,7 @@ class HStoreKeyed V8_FINAL
virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
- ASSERT(side_effect == kChangesNewSpacePromotion);
+ ASSERT(side_effect == kNewSpacePromotion);
new_space_dominator_ = dominator;
return false;
}
@@ -6714,7 +6807,6 @@ class HStoreKeyed V8_FINAL
is_uninitialized_(false),
store_mode_(store_mode),
new_space_dominator_(NULL) {
- if (!FLAG_smi_x64_store_opt) store_mode_ = INITIALIZING_STORE;
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
@@ -6724,20 +6816,20 @@ class HStoreKeyed V8_FINAL
if (IsFastObjectElementsKind(elements_kind)) {
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnNewSpacePromotion);
+ SetDependsOnFlag(kNewSpacePromotion);
}
if (is_external()) {
- SetGVNFlag(kChangesExternalMemory);
+ SetChangesFlag(kExternalMemory);
SetFlag(kAllowUndefinedAsNaN);
} else if (IsFastDoubleElementsKind(elements_kind)) {
- SetGVNFlag(kChangesDoubleArrayElements);
+ SetChangesFlag(kDoubleArrayElements);
} else if (IsFastSmiElementsKind(elements_kind)) {
- SetGVNFlag(kChangesArrayElements);
+ SetChangesFlag(kArrayElements);
} else if (is_fixed_typed_array()) {
- SetGVNFlag(kChangesTypedArrayElements);
+ SetChangesFlag(kTypedArrayElements);
SetFlag(kAllowUndefinedAsNaN);
} else {
- SetGVNFlag(kChangesArrayElements);
+ SetChangesFlag(kArrayElements);
}
// EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
@@ -6761,13 +6853,13 @@ class HStoreKeyed V8_FINAL
class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*,
- HValue*, HValue*, StrictModeFlag);
+ HValue*, HValue*, StrictMode);
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
HValue* context() { return OperandAt(3); }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
+ StrictMode strict_mode() { return strict_mode_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// tagged[tagged] = tagged
@@ -6783,8 +6875,8 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
HValue* object,
HValue* key,
HValue* value,
- StrictModeFlag strict_mode_flag)
- : strict_mode_flag_(strict_mode_flag) {
+ StrictMode strict_mode)
+ : strict_mode_(strict_mode) {
SetOperandAt(0, object);
SetOperandAt(1, key);
SetOperandAt(2, value);
@@ -6792,7 +6884,7 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
SetAllSideEffects();
}
- StrictModeFlag strict_mode_flag_;
+ StrictMode strict_mode_;
};
@@ -6829,6 +6921,8 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
transitioned_map_ == instr->transitioned_map_;
}
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
HTransitionElementsKind(HValue* context,
HValue* object,
@@ -6841,10 +6935,10 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(0, object);
SetOperandAt(1, context);
SetFlag(kUseGVN);
- SetGVNFlag(kChangesElementsKind);
+ SetChangesFlag(kElementsKind);
if (!IsSimpleMapChangeTransition(from_kind_, to_kind_)) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kElementsPointer);
+ SetChangesFlag(kNewSpacePromotion);
}
set_representation(Representation::Tagged());
}
@@ -6895,8 +6989,8 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
flags_(flags), pretenure_flag_(pretenure_flag) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetDependsOnFlag(kMaps);
+ SetChangesFlag(kNewSpacePromotion);
if (FLAG_trace_pretenuring) {
PrintF("HStringAdd with AllocationSite %p %s\n",
allocation_site.is_null()
@@ -6947,9 +7041,9 @@ class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
SetOperandAt(2, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnStringChars);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kStringChars);
+ SetChangesFlag(kNewSpacePromotion);
}
// No side effects: runtime function assumes string + number inputs.
@@ -6983,7 +7077,7 @@ class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, char_code);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE {
@@ -7078,7 +7172,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
bool pretenure() const { return pretenure_; }
bool has_no_literals() const { return has_no_literals_; }
bool is_generator() const { return is_generator_; }
- LanguageMode language_mode() const { return language_mode_; }
+ StrictMode strict_mode() const { return strict_mode_; }
private:
HFunctionLiteral(HValue* context,
@@ -7089,10 +7183,10 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
pretenure_(pretenure),
has_no_literals_(shared->num_literals() == 0),
is_generator_(shared->is_generator()),
- language_mode_(shared->language_mode()) {
+ strict_mode_(shared->strict_mode()) {
SetOperandAt(0, context);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -7101,7 +7195,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
bool pretenure_ : 1;
bool has_no_literals_ : 1;
bool is_generator_ : 1;
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
};
@@ -7163,7 +7257,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
private:
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
// This instruction is not marked as kChangesMaps, but does
// change the map of the input operand. Use it only when creating
@@ -7171,7 +7265,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
ASSERT(value->IsCallRuntime());
#ifdef DEBUG
const Runtime::Function* function = HCallRuntime::cast(value)->function();
- ASSERT(function->function_id == Runtime::kCreateObjectLiteral);
+ ASSERT(function->function_id == Runtime::kHiddenCreateObjectLiteral);
#endif
}
@@ -7242,7 +7336,7 @@ class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnStringChars);
+ SetDependsOnFlag(kStringChars);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -7281,7 +7375,7 @@ class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<4> {
SetOperandAt(2, index);
SetOperandAt(3, value);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesStringChars);
+ SetChangesFlag(kStringChars);
}
String::Encoding encoding_;
@@ -7321,8 +7415,8 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, map);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kElementsKind);
}
};
diff --git a/deps/v8/src/hydrogen-load-elimination.cc b/deps/v8/src/hydrogen-load-elimination.cc
index 0c7de85169..f84eac046b 100644
--- a/deps/v8/src/hydrogen-load-elimination.cc
+++ b/deps/v8/src/hydrogen-load-elimination.cc
@@ -100,26 +100,33 @@ class HLoadEliminationTable : public ZoneObject {
}
break;
}
+ case HValue::kTransitionElementsKind: {
+ HTransitionElementsKind* t = HTransitionElementsKind::cast(instr);
+ HValue* object = t->object()->ActualValue();
+ KillFieldInternal(object, FieldOf(JSArray::kElementsOffset), NULL);
+ KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+ break;
+ }
default: {
- if (instr->CheckGVNFlag(kChangesInobjectFields)) {
+ if (instr->CheckChangesFlag(kInobjectFields)) {
TRACE((" kill-all i%d\n", instr->id()));
Kill();
break;
}
- if (instr->CheckGVNFlag(kChangesMaps)) {
+ if (instr->CheckChangesFlag(kMaps)) {
TRACE((" kill-maps i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
}
- if (instr->CheckGVNFlag(kChangesElementsKind)) {
+ if (instr->CheckChangesFlag(kElementsKind)) {
TRACE((" kill-elements-kind i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
KillOffset(JSObject::kElementsOffset);
}
- if (instr->CheckGVNFlag(kChangesElementsPointer)) {
+ if (instr->CheckChangesFlag(kElementsPointer)) {
TRACE((" kill-elements i%d\n", instr->id()));
KillOffset(JSObject::kElementsOffset);
}
- if (instr->CheckGVNFlag(kChangesOsrEntries)) {
+ if (instr->CheckChangesFlag(kOsrEntries)) {
TRACE((" kill-osr i%d\n", instr->id()));
Kill();
}
@@ -134,8 +141,32 @@ class HLoadEliminationTable : public ZoneObject {
return this;
}
- // Support for global analysis with HFlowEngine: Copy state to successor
- // block.
+ // Support for global analysis with HFlowEngine: Merge given state with
+ // the other incoming state.
+ static HLoadEliminationTable* Merge(HLoadEliminationTable* succ_state,
+ HBasicBlock* succ_block,
+ HLoadEliminationTable* pred_state,
+ HBasicBlock* pred_block,
+ Zone* zone) {
+ ASSERT(pred_state != NULL);
+ if (succ_state == NULL) {
+ return pred_state->Copy(succ_block, pred_block, zone);
+ } else {
+ return succ_state->Merge(succ_block, pred_state, pred_block, zone);
+ }
+ }
+
+ // Support for global analysis with HFlowEngine: Given state merged with all
+ // the other incoming states, prepare it for use.
+ static HLoadEliminationTable* Finish(HLoadEliminationTable* state,
+ HBasicBlock* block,
+ Zone* zone) {
+ ASSERT(state != NULL);
+ return state;
+ }
+
+ private:
+ // Copy state to successor block.
HLoadEliminationTable* Copy(HBasicBlock* succ, HBasicBlock* from_block,
Zone* zone) {
HLoadEliminationTable* copy =
@@ -151,8 +182,7 @@ class HLoadEliminationTable : public ZoneObject {
return copy;
}
- // Support for global analysis with HFlowEngine: Merge this state with
- // the other incoming state.
+ // Merge this state with the other incoming state.
HLoadEliminationTable* Merge(HBasicBlock* succ, HLoadEliminationTable* that,
HBasicBlock* that_block, Zone* zone) {
if (that->fields_.length() < fields_.length()) {
@@ -432,11 +462,7 @@ class HLoadEliminationTable : public ZoneObject {
class HLoadEliminationEffects : public ZoneObject {
public:
explicit HLoadEliminationEffects(Zone* zone)
- : zone_(zone),
- maps_stored_(false),
- fields_stored_(false),
- elements_stored_(false),
- stores_(5, zone) { }
+ : zone_(zone), stores_(5, zone) { }
inline bool Disabled() {
return false; // Effects are _not_ disabled.
@@ -444,37 +470,25 @@ class HLoadEliminationEffects : public ZoneObject {
// Process a possibly side-effecting instruction.
void Process(HInstruction* instr, Zone* zone) {
- switch (instr->opcode()) {
- case HValue::kStoreNamedField: {
- stores_.Add(HStoreNamedField::cast(instr), zone_);
- break;
- }
- case HValue::kOsrEntry: {
- // Kill everything. Loads must not be hoisted past the OSR entry.
- maps_stored_ = true;
- fields_stored_ = true;
- elements_stored_ = true;
- }
- default: {
- fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields);
- maps_stored_ |= instr->CheckGVNFlag(kChangesMaps);
- maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
- elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
- elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer);
- }
+ if (instr->IsStoreNamedField()) {
+ stores_.Add(HStoreNamedField::cast(instr), zone_);
+ } else {
+ flags_.Add(instr->ChangesFlags());
}
}
// Apply these effects to the given load elimination table.
void Apply(HLoadEliminationTable* table) {
- if (fields_stored_) {
+ // Loads must not be hoisted past the OSR entry, therefore we kill
+ // everything if we see an OSR entry.
+ if (flags_.Contains(kInobjectFields) || flags_.Contains(kOsrEntries)) {
table->Kill();
return;
}
- if (maps_stored_) {
+ if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
table->KillOffset(JSObject::kMapOffset);
}
- if (elements_stored_) {
+ if (flags_.Contains(kElementsKind) || flags_.Contains(kElementsPointer)) {
table->KillOffset(JSObject::kElementsOffset);
}
@@ -486,9 +500,7 @@ class HLoadEliminationEffects : public ZoneObject {
// Union these effects with the other effects.
void Union(HLoadEliminationEffects* that, Zone* zone) {
- maps_stored_ |= that->maps_stored_;
- fields_stored_ |= that->fields_stored_;
- elements_stored_ |= that->elements_stored_;
+ flags_.Add(that->flags_);
for (int i = 0; i < that->stores_.length(); i++) {
stores_.Add(that->stores_[i], zone);
}
@@ -496,9 +508,7 @@ class HLoadEliminationEffects : public ZoneObject {
private:
Zone* zone_;
- bool maps_stored_ : 1;
- bool fields_stored_ : 1;
- bool elements_stored_ : 1;
+ GVNFlagSet flags_;
ZoneList<HStoreNamedField*> stores_;
};
diff --git a/deps/v8/src/hydrogen-minus-zero.cc b/deps/v8/src/hydrogen-minus-zero.cc
deleted file mode 100644
index 316e0f5077..0000000000
--- a/deps/v8/src/hydrogen-minus-zero.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-minus-zero.h"
-
-namespace v8 {
-namespace internal {
-
-void HComputeMinusZeroChecksPhase::Run() {
- const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
- for (int i = 0; i < blocks->length(); ++i) {
- for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- if (current->IsChange()) {
- HChange* change = HChange::cast(current);
- // Propagate flags for negative zero checks upwards from conversions
- // int32-to-tagged and int32-to-double.
- Representation from = change->value()->representation();
- ASSERT(from.Equals(change->from()));
- if (from.IsSmiOrInteger32()) {
- ASSERT(change->to().IsTagged() ||
- change->to().IsDouble() ||
- change->to().IsSmiOrInteger32());
- ASSERT(visited_.IsEmpty());
- PropagateMinusZeroChecks(change->value());
- visited_.Clear();
- }
- } else if (current->IsCompareMinusZeroAndBranch()) {
- HCompareMinusZeroAndBranch* check =
- HCompareMinusZeroAndBranch::cast(current);
- if (check->value()->representation().IsSmiOrInteger32()) {
- ASSERT(visited_.IsEmpty());
- PropagateMinusZeroChecks(check->value());
- visited_.Clear();
- }
- }
- }
- }
-}
-
-
-void HComputeMinusZeroChecksPhase::PropagateMinusZeroChecks(HValue* value) {
- for (HValue* current = value;
- current != NULL && !visited_.Contains(current->id());
- current = current->EnsureAndPropagateNotMinusZero(&visited_)) {
- // For phis, we must propagate the check to all of its inputs.
- if (current->IsPhi()) {
- visited_.Add(current->id());
- HPhi* phi = HPhi::cast(current);
- for (int i = 0; i < phi->OperandCount(); ++i) {
- PropagateMinusZeroChecks(phi->OperandAt(i));
- }
- break;
- }
-
- // For multiplication, division, and Math.min/max(), we must propagate
- // to the left and the right side.
- if (current->IsMul() || current->IsDiv() || current->IsMathMinMax()) {
- HBinaryOperation* operation = HBinaryOperation::cast(current);
- operation->EnsureAndPropagateNotMinusZero(&visited_);
- PropagateMinusZeroChecks(operation->left());
- PropagateMinusZeroChecks(operation->right());
- }
- }
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-range-analysis.cc b/deps/v8/src/hydrogen-range-analysis.cc
index 76fd5f35f2..9d58fc89f0 100644
--- a/deps/v8/src/hydrogen-range-analysis.cc
+++ b/deps/v8/src/hydrogen-range-analysis.cc
@@ -78,7 +78,29 @@ void HRangeAnalysisPhase::Run() {
// Go through all instructions of the current block.
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- InferRange(it.Current());
+ HValue* value = it.Current();
+ InferRange(value);
+
+ // Compute the bailout-on-minus-zero flag.
+ if (value->IsChange()) {
+ HChange* instr = HChange::cast(value);
+ // Propagate flags for negative zero checks upwards from conversions
+ // int32-to-tagged and int32-to-double.
+ Representation from = instr->value()->representation();
+ ASSERT(from.Equals(instr->from()));
+ if (from.IsSmiOrInteger32()) {
+ ASSERT(instr->to().IsTagged() ||
+ instr->to().IsDouble() ||
+ instr->to().IsSmiOrInteger32());
+ PropagateMinusZeroChecks(instr->value());
+ }
+ } else if (value->IsCompareMinusZeroAndBranch()) {
+ HCompareMinusZeroAndBranch* instr =
+ HCompareMinusZeroAndBranch::cast(value);
+ if (instr->value()->representation().IsSmiOrInteger32()) {
+ PropagateMinusZeroChecks(instr->value());
+ }
+ }
}
// Continue analysis in all dominated blocks.
@@ -197,4 +219,79 @@ void HRangeAnalysisPhase::AddRange(HValue* value, Range* range) {
}
+void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) {
+ ASSERT(worklist_.is_empty());
+ ASSERT(in_worklist_.IsEmpty());
+
+ AddToWorklist(value);
+ while (!worklist_.is_empty()) {
+ value = worklist_.RemoveLast();
+
+ if (value->IsPhi()) {
+ // For phis, we must propagate the check to all of its inputs.
+ HPhi* phi = HPhi::cast(value);
+ for (int i = 0; i < phi->OperandCount(); ++i) {
+ AddToWorklist(phi->OperandAt(i));
+ }
+ } else if (value->IsUnaryMathOperation()) {
+ HUnaryMathOperation* instr = HUnaryMathOperation::cast(value);
+ if (instr->representation().IsSmiOrInteger32() &&
+ !instr->value()->representation().Equals(instr->representation())) {
+ if (instr->value()->range() == NULL ||
+ instr->value()->range()->CanBeMinusZero()) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ }
+ }
+ if (instr->RequiredInputRepresentation(0).IsSmiOrInteger32() &&
+ instr->representation().Equals(
+ instr->RequiredInputRepresentation(0))) {
+ AddToWorklist(instr->value());
+ }
+ } else if (value->IsChange()) {
+ HChange* instr = HChange::cast(value);
+ if (!instr->from().IsSmiOrInteger32() &&
+ !instr->CanTruncateToInt32() &&
+ (instr->value()->range() == NULL ||
+ instr->value()->range()->CanBeMinusZero())) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ }
+ } else if (value->IsForceRepresentation()) {
+ HForceRepresentation* instr = HForceRepresentation::cast(value);
+ AddToWorklist(instr->value());
+ } else if (value->IsMod()) {
+ HMod* instr = HMod::cast(value);
+ if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ AddToWorklist(instr->left());
+ }
+ } else if (value->IsDiv() || value->IsMul()) {
+ HBinaryOperation* instr = HBinaryOperation::cast(value);
+ if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ }
+ AddToWorklist(instr->right());
+ AddToWorklist(instr->left());
+ } else if (value->IsMathFloorOfDiv()) {
+ HMathFloorOfDiv* instr = HMathFloorOfDiv::cast(value);
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ } else if (value->IsAdd() || value->IsSub()) {
+ HBinaryOperation* instr = HBinaryOperation::cast(value);
+ if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+ // Propagate to the left argument. If the left argument cannot be -0,
+ // then the result of the add/sub operation cannot be either.
+ AddToWorklist(instr->left());
+ }
+ } else if (value->IsMathMinMax()) {
+ HMathMinMax* instr = HMathMinMax::cast(value);
+ AddToWorklist(instr->right());
+ AddToWorklist(instr->left());
+ }
+ }
+
+ in_worklist_.Clear();
+ ASSERT(in_worklist_.IsEmpty());
+ ASSERT(worklist_.is_empty());
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-range-analysis.h b/deps/v8/src/hydrogen-range-analysis.h
index a1e9737c5e..e0cc3c5dad 100644
--- a/deps/v8/src/hydrogen-range-analysis.h
+++ b/deps/v8/src/hydrogen-range-analysis.h
@@ -37,7 +37,9 @@ namespace internal {
class HRangeAnalysisPhase : public HPhase {
public:
explicit HRangeAnalysisPhase(HGraph* graph)
- : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()) { }
+ : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()),
+ in_worklist_(graph->GetMaximumValueID(), zone()),
+ worklist_(32, zone()) {}
void Run();
@@ -49,8 +51,19 @@ class HRangeAnalysisPhase : public HPhase {
void InferRange(HValue* value);
void RollBackTo(int index);
void AddRange(HValue* value, Range* range);
+ void AddToWorklist(HValue* value) {
+ if (in_worklist_.Contains(value->id())) return;
+ in_worklist_.Add(value->id());
+ worklist_.Add(value, zone());
+ }
+ void PropagateMinusZeroChecks(HValue* value);
ZoneList<HValue*> changed_ranges_;
+
+ BitVector in_worklist_;
+ ZoneList<HValue*> worklist_;
+
+ DISALLOW_COPY_AND_ASSIGN(HRangeAnalysisPhase);
};
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
index 07fc8be38c..0b87d12eb3 100644
--- a/deps/v8/src/hydrogen-representation-changes.cc
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -61,10 +61,11 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
if (new_value == NULL) {
new_value = new(graph()->zone()) HChange(
value, to, is_truncating_to_smi, is_truncating_to_int);
- if (use_value->operand_position(use_index) != RelocInfo::kNoPosition) {
+ if (!use_value->operand_position(use_index).IsUnknown()) {
new_value->set_position(use_value->operand_position(use_index));
} else {
- ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing());
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !graph()->info()->IsOptimizing());
}
}
@@ -77,7 +78,10 @@ void HRepresentationChangesPhase::InsertRepresentationChangesForValue(
HValue* value) {
Representation r = value->representation();
if (r.IsNone()) return;
- if (value->HasNoUses()) return;
+ if (value->HasNoUses()) {
+ if (value->IsForceRepresentation()) value->DeleteAndReplaceWith(NULL);
+ return;
+ }
for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use_value = it.value();
diff --git a/deps/v8/src/hydrogen-store-elimination.cc b/deps/v8/src/hydrogen-store-elimination.cc
new file mode 100644
index 0000000000..2e6ee51387
--- /dev/null
+++ b/deps/v8/src/hydrogen-store-elimination.cc
@@ -0,0 +1,139 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-store-elimination.h"
+#include "hydrogen-instructions.h"
+
+namespace v8 {
+namespace internal {
+
+#define TRACE(x) if (FLAG_trace_store_elimination) PrintF x
+
+// Performs a block-by-block local analysis for removable stores.
+void HStoreEliminationPhase::Run() {
+ GVNFlagSet flags; // Use GVN flags as an approximation for some instructions.
+ flags.RemoveAll();
+
+ flags.Add(kArrayElements);
+ flags.Add(kArrayLengths);
+ flags.Add(kStringLengths);
+ flags.Add(kBackingStoreFields);
+ flags.Add(kDoubleArrayElements);
+ flags.Add(kDoubleFields);
+ flags.Add(kElementsPointer);
+ flags.Add(kInobjectFields);
+ flags.Add(kExternalMemory);
+ flags.Add(kStringChars);
+ flags.Add(kTypedArrayElements);
+
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ unobserved_.Rewind(0);
+ HBasicBlock* block = graph()->blocks()->at(i);
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+
+ // TODO(titzer): eliminate unobserved HStoreKeyed instructions too.
+ switch (instr->opcode()) {
+ case HValue::kStoreNamedField:
+ // Remove any unobserved stores overwritten by this store.
+ ProcessStore(HStoreNamedField::cast(instr));
+ break;
+ case HValue::kLoadNamedField:
+ // Observe any unobserved stores on this object + field.
+ ProcessLoad(HLoadNamedField::cast(instr));
+ break;
+ default:
+ ProcessInstr(instr, flags);
+ break;
+ }
+ }
+ }
+}
+
+
+void HStoreEliminationPhase::ProcessStore(HStoreNamedField* store) {
+ HValue* object = store->object()->ActualValue();
+ int i = 0;
+ while (i < unobserved_.length()) {
+ HStoreNamedField* prev = unobserved_.at(i);
+ if (aliasing_->MustAlias(object, prev->object()->ActualValue()) &&
+ store->access().Equals(prev->access())) {
+ // This store is guaranteed to overwrite the previous store.
+ prev->DeleteAndReplaceWith(NULL);
+ TRACE(("++ Unobserved store S%d overwritten by S%d\n",
+ prev->id(), store->id()));
+ unobserved_.Remove(i);
+ } else {
+ // TODO(titzer): remove map word clearing from folded allocations.
+ i++;
+ }
+ }
+ // Only non-transitioning stores are removable.
+ if (!store->has_transition()) {
+ TRACE(("-- Might remove store S%d\n", store->id()));
+ unobserved_.Add(store, zone());
+ }
+}
+
+
+void HStoreEliminationPhase::ProcessLoad(HLoadNamedField* load) {
+ HValue* object = load->object()->ActualValue();
+ int i = 0;
+ while (i < unobserved_.length()) {
+ HStoreNamedField* prev = unobserved_.at(i);
+ if (aliasing_->MayAlias(object, prev->object()->ActualValue()) &&
+ load->access().Equals(prev->access())) {
+ TRACE(("-- Observed store S%d by load L%d\n", prev->id(), load->id()));
+ unobserved_.Remove(i);
+ } else {
+ i++;
+ }
+ }
+}
+
+
+void HStoreEliminationPhase::ProcessInstr(HInstruction* instr,
+ GVNFlagSet flags) {
+ if (unobserved_.length() == 0) return; // Nothing to do.
+ if (instr->CanDeoptimize()) {
+ TRACE(("-- Observed stores at I%d (might deoptimize)\n", instr->id()));
+ unobserved_.Rewind(0);
+ return;
+ }
+ if (instr->CheckChangesFlag(kNewSpacePromotion)) {
+ TRACE(("-- Observed stores at I%d (might GC)\n", instr->id()));
+ unobserved_.Rewind(0);
+ return;
+ }
+ if (instr->ChangesFlags().ContainsAnyOf(flags)) {
+ TRACE(("-- Observed stores at I%d (GVN flags)\n", instr->id()));
+ unobserved_.Rewind(0);
+ return;
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-minus-zero.h b/deps/v8/src/hydrogen-store-elimination.h
index d23ec1196b..7dc871c9ba 100644
--- a/deps/v8/src/hydrogen-minus-zero.h
+++ b/deps/v8/src/hydrogen-store-elimination.h
@@ -25,32 +25,33 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_HYDROGEN_MINUS_ZERO_H_
-#define V8_HYDROGEN_MINUS_ZERO_H_
+#ifndef V8_HYDROGEN_STORE_ELIMINATION_H_
+#define V8_HYDROGEN_STORE_ELIMINATION_H_
#include "hydrogen.h"
+#include "hydrogen-alias-analysis.h"
namespace v8 {
namespace internal {
-
-class HComputeMinusZeroChecksPhase : public HPhase {
+class HStoreEliminationPhase : public HPhase {
public:
- explicit HComputeMinusZeroChecksPhase(HGraph* graph)
- : HPhase("H_Compute minus zero checks", graph),
- visited_(graph->GetMaximumValueID(), zone()) { }
+ explicit HStoreEliminationPhase(HGraph* graph)
+ : HPhase("H_Store elimination", graph),
+ unobserved_(10, zone()),
+ aliasing_() { }
void Run();
-
private:
- void PropagateMinusZeroChecks(HValue* value);
-
- BitVector visited_;
+ ZoneList<HStoreNamedField*> unobserved_;
+ HAliasAnalyzer* aliasing_;
- DISALLOW_COPY_AND_ASSIGN(HComputeMinusZeroChecksPhase);
+ void ProcessStore(HStoreNamedField* store);
+ void ProcessLoad(HLoadNamedField* load);
+ void ProcessInstr(HInstruction* instr, GVNFlagSet flags);
};
} } // namespace v8::internal
-#endif // V8_HYDROGEN_MINUS_ZERO_H_
+#endif
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 16096ccf9f..a7ef0cbd05 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -48,13 +48,13 @@
#include "hydrogen-gvn.h"
#include "hydrogen-mark-deoptimize.h"
#include "hydrogen-mark-unreachable.h"
-#include "hydrogen-minus-zero.h"
#include "hydrogen-osr.h"
#include "hydrogen-range-analysis.h"
#include "hydrogen-redundant-phi.h"
#include "hydrogen-removable-simulates.h"
#include "hydrogen-representation-changes.h"
#include "hydrogen-sce.h"
+#include "hydrogen-store-elimination.h"
#include "hydrogen-uint32-analysis.h"
#include "lithium-allocator.h"
#include "parser.h"
@@ -68,6 +68,8 @@
#include "ia32/lithium-codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-codegen-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -141,12 +143,13 @@ void HBasicBlock::RemovePhi(HPhi* phi) {
}
-void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
+void HBasicBlock::AddInstruction(HInstruction* instr,
+ HSourcePosition position) {
ASSERT(!IsStartBlock() || !IsFinished());
ASSERT(!instr->IsLinked());
ASSERT(!IsFinished());
- if (position != RelocInfo::kNoPosition) {
+ if (!position.IsUnknown()) {
instr->set_position(position);
}
if (first_ == NULL) {
@@ -154,10 +157,10 @@ void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
ASSERT(!last_environment()->ast_id().IsNone());
HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
- if (position != RelocInfo::kNoPosition) {
+ if (!position.IsUnknown()) {
entry->set_position(position);
} else {
- ASSERT(!FLAG_emit_opt_code_positions ||
+ ASSERT(!FLAG_hydrogen_track_positions ||
!graph()->info()->IsOptimizing());
}
first_ = last_ = entry;
@@ -210,7 +213,7 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
}
-void HBasicBlock::Finish(HControlInstruction* end, int position) {
+void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) {
ASSERT(!IsFinished());
AddInstruction(end, position);
end_ = end;
@@ -221,7 +224,7 @@ void HBasicBlock::Finish(HControlInstruction* end, int position) {
void HBasicBlock::Goto(HBasicBlock* block,
- int position,
+ HSourcePosition position,
FunctionState* state,
bool add_simulate) {
bool drop_extra = state != NULL &&
@@ -244,7 +247,7 @@ void HBasicBlock::Goto(HBasicBlock* block,
void HBasicBlock::AddLeaveInlined(HValue* return_value,
FunctionState* state,
- int position) {
+ HSourcePosition position) {
HBasicBlock* target = state->function_return();
bool drop_extra = state->inlining_kind() == NORMAL_RETURN;
@@ -337,6 +340,15 @@ void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
}
+void HBasicBlock::MarkSuccEdgeUnreachable(int succ) {
+ ASSERT(IsFinished());
+ HBasicBlock* succ_block = end()->SuccessorAt(succ);
+
+ ASSERT(succ_block->predecessors()->length() == 1);
+ succ_block->MarkUnreachable();
+}
+
+
void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
if (HasPredecessor()) {
// Only loop header blocks can have a predecessor added after
@@ -696,10 +708,10 @@ HConstant* HGraph::GetConstant##Name() { \
Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
Representation::Tagged(), \
htype, \
- false, \
true, \
+ boolean_value, \
false, \
- boolean_value); \
+ ODDBALL_TYPE); \
constant->InsertAfter(entry_block()->first()); \
constant_##name##_.set(constant); \
} \
@@ -1032,9 +1044,9 @@ void HGraphBuilder::IfBuilder::End() {
current = merge_at_join_blocks_;
while (current != NULL) {
if (current->deopt_ && current->block_ != NULL) {
- builder_->PadEnvironmentForContinuation(current->block_,
- merge_block);
- builder_->GotoNoSimulate(current->block_, merge_block);
+ current->block_->FinishExit(
+ HAbnormalExit::New(builder_->zone(), NULL),
+ HSourcePosition::Unknown());
}
current = current->next_;
}
@@ -1167,9 +1179,10 @@ HGraph* HGraphBuilder::CreateGraph() {
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
- ASSERT(!FLAG_emit_opt_code_positions ||
- position_ != RelocInfo::kNoPosition || !info_->IsOptimizing());
- current_block()->AddInstruction(instr, position_);
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !position_.IsUnknown() ||
+ !info_->IsOptimizing());
+ current_block()->AddInstruction(instr, source_position());
if (graph()->IsInsideNoSideEffectsScope()) {
instr->SetFlag(HValue::kHasNoObservableSideEffects);
}
@@ -1178,9 +1191,10 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
- ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
- position_ != RelocInfo::kNoPosition);
- current_block()->Finish(last, position_);
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !info_->IsOptimizing() ||
+ !position_.IsUnknown());
+ current_block()->Finish(last, source_position());
if (last->IsReturn() || last->IsAbnormalExit()) {
set_current_block(NULL);
}
@@ -1188,9 +1202,9 @@ void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
- ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
- position_ != RelocInfo::kNoPosition);
- current_block()->FinishExit(instruction, position_);
+ ASSERT(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
+ !position_.IsUnknown());
+ current_block()->FinishExit(instruction, source_position());
if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
set_current_block(NULL);
}
@@ -1214,7 +1228,7 @@ void HGraphBuilder::AddSimulate(BailoutId id,
RemovableSimulate removable) {
ASSERT(current_block() != NULL);
ASSERT(!graph()->IsInsideNoSideEffectsScope());
- current_block()->AddNewSimulate(id, position_, removable);
+ current_block()->AddNewSimulate(id, source_position(), removable);
}
@@ -1240,38 +1254,9 @@ HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
}
-void HGraphBuilder::FinishExitWithHardDeoptimization(
- const char* reason, HBasicBlock* continuation) {
- PadEnvironmentForContinuation(current_block(), continuation);
+void HGraphBuilder::FinishExitWithHardDeoptimization(const char* reason) {
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
- if (graph()->IsInsideNoSideEffectsScope()) {
- GotoNoSimulate(continuation);
- } else {
- Goto(continuation);
- }
-}
-
-
-void HGraphBuilder::PadEnvironmentForContinuation(
- HBasicBlock* from,
- HBasicBlock* continuation) {
- if (continuation->last_environment() != NULL) {
- // When merging from a deopt block to a continuation, resolve differences in
- // environment by pushing constant 0 and popping extra values so that the
- // environments match during the join. Push 0 since it has the most specific
- // representation, and will not influence representation inference of the
- // phi.
- int continuation_env_length = continuation->last_environment()->length();
- while (continuation_env_length != from->last_environment()->length()) {
- if (continuation_env_length > from->last_environment()->length()) {
- from->last_environment()->Push(graph()->GetConstant0());
- } else {
- from->last_environment()->Pop();
- }
- }
- } else {
- ASSERT(continuation->predecessors()->length() == 0);
- }
+ FinishExitCurrentBlock(New<HAbnormalExit>());
}
@@ -1298,19 +1283,20 @@ HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
Handle<JSFunction> f = Handle<JSFunction>::cast(
HConstant::cast(function)->handle(isolate()));
SharedFunctionInfo* shared = f->shared();
- if (!shared->is_classic_mode() || shared->native()) return object;
+ if (shared->strict_mode() == STRICT || shared->native()) return object;
}
return Add<HWrapReceiver>(object, function);
}
-HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
- HValue* elements,
- ElementsKind kind,
- HValue* length,
- HValue* key,
- bool is_js_array,
- bool is_store) {
+HValue* HGraphBuilder::BuildCheckForCapacityGrow(
+ HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ HValue* length,
+ HValue* key,
+ bool is_js_array,
+ PropertyAccessType access_type) {
IfBuilder length_checker(this);
Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
@@ -1353,7 +1339,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
new_length);
}
- if (is_store && kind == FAST_SMI_ELEMENTS) {
+ if (access_type == STORE && kind == FAST_SMI_ELEMENTS) {
HValue* checked_elements = environment()->Top();
// Write zero to ensure that the new element is initialized with some smi.
@@ -1464,7 +1450,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
HValue* candidate_key = Add<HLoadKeyed>(elements, key_index,
static_cast<HValue*>(NULL),
- FAST_SMI_ELEMENTS);
+ FAST_ELEMENTS);
IfBuilder key_compare(this);
key_compare.IfNot<HCompareObjectEqAndBranch>(key, candidate_key);
@@ -1490,7 +1476,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
HValue* details = Add<HLoadKeyed>(elements, details_index,
static_cast<HValue*>(NULL),
- FAST_SMI_ELEMENTS);
+ FAST_ELEMENTS);
IfBuilder details_compare(this);
details_compare.If<HCompareNumericAndBranch>(details,
graph()->GetConstant0(),
@@ -1560,7 +1546,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
elements,
Add<HConstant>(NameDictionary::kCapacityIndex),
static_cast<HValue*>(NULL),
- FAST_SMI_ELEMENTS);
+ FAST_ELEMENTS);
HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
mask->ChangeRepresentation(Representation::Integer32());
@@ -1689,7 +1675,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
}
if_objectissmi.Else();
{
- if (type->Is(Type::Smi())) {
+ if (type->Is(Type::SignedSmall())) {
if_objectissmi.Deopt("Expected smi");
} else {
// Check if the object is a heap number.
@@ -1768,7 +1754,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
Add<HPushArgument>(object);
Push(Add<HCallRuntime>(
isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
+ Runtime::FunctionForId(Runtime::kHiddenNumberToStringSkipCache),
1));
}
if_found.End();
@@ -1806,19 +1792,10 @@ HAllocate* HGraphBuilder::BuildAllocate(
HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length,
HValue* right_length) {
- // Compute the combined string length. If the result is larger than the max
- // supported string length, we bailout to the runtime. This is done implicitly
- // when converting the result back to a smi in case the max string length
- // equals the max smi value. Otherwise, for platforms with 32-bit smis, we do
+ // Compute the combined string length and check against max string length.
HValue* length = AddUncasted<HAdd>(left_length, right_length);
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (String::kMaxLength != Smi::kMaxValue) {
- IfBuilder if_nooverflow(this);
- if_nooverflow.If<HCompareNumericAndBranch>(
- length, Add<HConstant>(String::kMaxLength), Token::LTE);
- if_nooverflow.Then();
- if_nooverflow.ElseDeopt("String length exceeds limit");
- }
+ HValue* max_length = Add<HConstant>(String::kMaxLength);
+ Add<HBoundsCheck>(length, max_length);
return length;
}
@@ -1927,6 +1904,19 @@ void HGraphBuilder::BuildCopySeqStringChars(HValue* src,
}
+HValue* HGraphBuilder::BuildObjectSizeAlignment(
+ HValue* unaligned_size, int header_size) {
+ ASSERT((header_size & kObjectAlignmentMask) == 0);
+ HValue* size = AddUncasted<HAdd>(
+ unaligned_size, Add<HConstant>(static_cast<int32_t>(
+ header_size + kObjectAlignmentMask)));
+ size->ClearFlag(HValue::kCanOverflow);
+ return AddUncasted<HBitwise>(
+ Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
+ ~kObjectAlignmentMask)));
+}
+
+
HValue* HGraphBuilder::BuildUncheckedStringAdd(
HValue* left,
HValue* right,
@@ -2027,13 +2017,7 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
// Calculate the number of bytes needed for the characters in the
// string while observing object alignment.
STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
- HValue* size = Pop();
- size = AddUncasted<HAdd>(size, Add<HConstant>(static_cast<int32_t>(
- SeqString::kHeaderSize + kObjectAlignmentMask)));
- size->ClearFlag(HValue::kCanOverflow);
- size = AddUncasted<HBitwise>(
- Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
- ~kObjectAlignmentMask)));
+ HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize);
// Allocate the string object. HAllocate does not care whether we pass
// STRING_TYPE or ASCII_STRING_TYPE here, so we just use STRING_TYPE here.
@@ -2092,9 +2076,10 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
// Fallback to the runtime to add the two strings.
Add<HPushArgument>(left);
Add<HPushArgument>(right);
- Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kStringAdd),
- 2));
+ Push(Add<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd),
+ 2));
}
if_sameencodingandsequential.End();
}
@@ -2159,7 +2144,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode) {
ASSERT((!IsExternalArrayElementsKind(elements_kind) &&
@@ -2172,18 +2157,18 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
// generated store code.
if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
- (elements_kind == FAST_ELEMENTS && is_store)) {
- checked_object->ClearGVNFlag(kDependsOnElementsKind);
+ (elements_kind == FAST_ELEMENTS && access_type == STORE)) {
+ checked_object->ClearDependsOnFlag(kElementsKind);
}
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
HValue* elements = AddLoadElements(checked_object);
- if (is_store && (fast_elements || fast_smi_only_elements) &&
+ if (access_type == STORE && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map(), top_info());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ check_cow_map->ClearDependsOnFlag(kElementsKind);
}
HInstruction* length = NULL;
if (is_js_array) {
@@ -2215,7 +2200,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
HInstruction* result = AddElementAccess(
- backing_store, key, val, bounds_check, elements_kind, is_store);
+ backing_store, key, val, bounds_check, elements_kind, access_type);
negative_checker.ElseDeopt("Negative key encountered");
negative_checker.End();
length_checker.End();
@@ -2225,7 +2210,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
checked_key = Add<HBoundsCheck>(key, length);
return AddElementAccess(
backing_store, checked_key, val,
- checked_object, elements_kind, is_store);
+ checked_object, elements_kind, access_type);
}
}
ASSERT(fast_smi_only_elements ||
@@ -2235,7 +2220,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// In case val is stored into a fast smi array, assure that the value is a smi
// before manipulating the backing store. Otherwise the actual store may
// deopt, leaving the backing store in an invalid state.
- if (is_store && IsFastSmiElementsKind(elements_kind) &&
+ if (access_type == STORE && IsFastSmiElementsKind(elements_kind) &&
!val->type().IsSmi()) {
val = AddUncasted<HForceRepresentation>(val, Representation::Smi());
}
@@ -2244,12 +2229,12 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
NoObservableSideEffectsScope no_effects(this);
elements = BuildCheckForCapacityGrow(checked_object, elements,
elements_kind, length, key,
- is_js_array, is_store);
+ is_js_array, access_type);
checked_key = key;
} else {
checked_key = Add<HBoundsCheck>(key, length);
- if (is_store && (fast_elements || fast_smi_only_elements)) {
+ if (access_type == STORE && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
NoObservableSideEffectsScope no_effects(this);
elements = BuildCopyElementsOnWrite(checked_object, elements,
@@ -2257,12 +2242,12 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
} else {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map(), top_info());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ check_cow_map->ClearDependsOnFlag(kElementsKind);
}
}
}
return AddElementAccess(elements, checked_key, val, checked_object,
- elements_kind, is_store, load_mode);
+ elements_kind, access_type, load_mode);
}
@@ -2333,7 +2318,7 @@ HValue* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
PretenureFlag pretenure_flag = !FLAG_allocation_site_pretenuring ?
isolate()->heap()->GetPretenureMode() : NOT_TENURED;
- return Add<HAllocate>(total_size, HType::JSArray(), pretenure_flag,
+ return Add<HAllocate>(total_size, HType::Tagged(), pretenure_flag,
instance_type);
}
@@ -2404,9 +2389,9 @@ HInstruction* HGraphBuilder::AddElementAccess(
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode) {
- if (is_store) {
+ if (access_type == STORE) {
ASSERT(val != NULL);
if (elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
elements_kind == UINT8_CLAMPED_ELEMENTS) {
@@ -2418,7 +2403,7 @@ HInstruction* HGraphBuilder::AddElementAccess(
: INITIALIZING_STORE);
}
- ASSERT(!is_store);
+ ASSERT(access_type == LOAD);
ASSERT(val == NULL);
HLoadKeyed* load = Add<HLoadKeyed>(
elements, checked_key, dependency, elements_kind, load_mode);
@@ -2639,11 +2624,11 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
HValue* object_elements;
if (IsFastDoubleElementsKind(kind)) {
HValue* elems_size = Add<HConstant>(FixedDoubleArray::SizeFor(length));
- object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
+ object_elements = Add<HAllocate>(elems_size, HType::Tagged(),
NOT_TENURED, FIXED_DOUBLE_ARRAY_TYPE);
} else {
HValue* elems_size = Add<HConstant>(FixedArray::SizeFor(length));
- object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
+ object_elements = Add<HAllocate>(elems_size, HType::Tagged(),
NOT_TENURED, FIXED_ARRAY_TYPE);
}
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
@@ -2834,7 +2819,8 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
// No need for a context lookup if the kind_ matches the initial
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddLoadNamedField(constructor_function_, access);
+ return builder()->Add<HLoadNamedField>(
+ constructor_function_, static_cast<HValue*>(NULL), access);
}
// TODO(mvstanton): we should always have a constructor function if we
@@ -2859,7 +2845,8 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddLoadNamedField(constructor_function_, access);
+ return builder()->Add<HLoadNamedField>(
+ constructor_function_, static_cast<HValue*>(NULL), access);
}
@@ -2993,7 +2980,7 @@ HValue* HGraphBuilder::AddLoadJSBuiltin(Builtins::JavaScript builtin) {
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
: HGraphBuilder(info),
function_state_(NULL),
- initial_function_state_(this, info, NORMAL_RETURN),
+ initial_function_state_(this, info, NORMAL_RETURN, 0),
ast_context_(NULL),
break_scope_(NULL),
inlined_count_(0),
@@ -3005,7 +2992,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// to know it's the initial state.
function_state_= &initial_function_state_;
InitializeAstVisitor(info->zone());
- if (FLAG_emit_opt_code_positions) {
+ if (FLAG_hydrogen_track_positions) {
SetSourcePosition(info->shared_info()->start_position());
}
}
@@ -3074,7 +3061,8 @@ HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
}
-void HBasicBlock::FinishExit(HControlInstruction* instruction, int position) {
+void HBasicBlock::FinishExit(HControlInstruction* instruction,
+ HSourcePosition position) {
Finish(instruction, position);
ClearEnvironment();
}
@@ -3097,7 +3085,9 @@ HGraph::HGraph(CompilationInfo* info)
type_change_checksum_(0),
maximum_environment_size_(0),
no_side_effects_scope_count_(0),
- disallow_adding_new_values_(false) {
+ disallow_adding_new_values_(false),
+ next_inline_id_(0),
+ inlined_functions_(5, info->zone()) {
if (info->IsStub()) {
HydrogenCodeStub* stub = info->code_stub();
CodeStubInterfaceDescriptor* descriptor =
@@ -3105,6 +3095,7 @@ HGraph::HGraph(CompilationInfo* info)
start_environment_ =
new(zone_) HEnvironment(zone_, descriptor->environment_length());
} else {
+ TraceInlinedFunction(info->shared_info(), HSourcePosition::Unknown());
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
@@ -3132,6 +3123,81 @@ void HGraph::FinalizeUniqueness() {
}
+int HGraph::TraceInlinedFunction(
+ Handle<SharedFunctionInfo> shared,
+ HSourcePosition position) {
+ if (!FLAG_hydrogen_track_positions) {
+ return 0;
+ }
+
+ int id = 0;
+ for (; id < inlined_functions_.length(); id++) {
+ if (inlined_functions_[id].shared().is_identical_to(shared)) {
+ break;
+ }
+ }
+
+ if (id == inlined_functions_.length()) {
+ inlined_functions_.Add(InlinedFunctionInfo(shared), zone());
+
+ if (!shared->script()->IsUndefined()) {
+ Handle<Script> script(Script::cast(shared->script()));
+ if (!script->source()->IsUndefined()) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ PrintF(tracing_scope.file(),
+ "--- FUNCTION SOURCE (%s) id{%d,%d} ---\n",
+ shared->DebugName()->ToCString().get(),
+ info()->optimization_id(),
+ id);
+
+ {
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(String::cast(script->source()),
+ &op,
+ shared->start_position());
+ // fun->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len =
+ shared->end_position() - shared->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.HasMore()) {
+ PrintF(tracing_scope.file(), "%c", stream.GetNext());
+ }
+ }
+ }
+
+ PrintF(tracing_scope.file(), "\n--- END ---\n");
+ }
+ }
+ }
+
+ int inline_id = next_inline_id_++;
+
+ if (inline_id != 0) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ PrintF(tracing_scope.file(), "INLINE (%s) id{%d,%d} AS %d AT ",
+ shared->DebugName()->ToCString().get(),
+ info()->optimization_id(),
+ id,
+ inline_id);
+ position.PrintTo(tracing_scope.file());
+ PrintF(tracing_scope.file(), "\n");
+ }
+
+ return inline_id;
+}
+
+
+int HGraph::SourcePositionToScriptPosition(HSourcePosition pos) {
+ if (!FLAG_hydrogen_track_positions || pos.IsUnknown()) {
+ return pos.raw();
+ }
+
+ return inlined_functions_[pos.inlining_id()].start_position() +
+ pos.position();
+}
+
+
// Block ordering was implemented with two mutually recursive methods,
// HGraph::Postorder and HGraph::PostorderLoopBlocks.
// The recursion could lead to stack overflow so the algorithm has been
@@ -3510,7 +3576,8 @@ void HGraph::CollectPhis() {
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- InliningKind inlining_kind)
+ InliningKind inlining_kind,
+ int inlining_id)
: owner_(owner),
compilation_info_(info),
call_context_(NULL),
@@ -3520,6 +3587,8 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
entry_(NULL),
arguments_object_(NULL),
arguments_elements_(NULL),
+ inlining_id_(inlining_id),
+ outer_source_position_(HSourcePosition::Unknown()),
outer_(owner->function_state()) {
if (outer_ != NULL) {
// State for an inline function.
@@ -3543,12 +3612,27 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
// Push on the state stack.
owner->set_function_state(this);
+
+ if (FLAG_hydrogen_track_positions) {
+ outer_source_position_ = owner->source_position();
+ owner->EnterInlinedSource(
+ info->shared_info()->start_position(),
+ inlining_id);
+ owner->SetSourcePosition(info->shared_info()->start_position());
+ }
}
FunctionState::~FunctionState() {
delete test_context_;
owner_->set_function_state(outer_);
+
+ if (FLAG_hydrogen_track_positions) {
+ owner_->set_source_position(outer_source_position_);
+ owner_->EnterInlinedSource(
+ outer_->compilation_info()->shared_info()->start_position(),
+ outer_->inlining_id());
+ }
}
@@ -3807,7 +3891,6 @@ void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
}
-
void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
@@ -3816,20 +3899,6 @@ void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
}
-void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
- CHECK_ALIVE(VisitForValue(expr));
- Push(Add<HPushArgument>(Pop()));
-}
-
-
-void HOptimizedGraphBuilder::VisitArgumentList(
- ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- CHECK_ALIVE(VisitArgument(arguments->at(i)));
- }
-}
-
-
void HOptimizedGraphBuilder::VisitExpressions(
ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
@@ -3979,10 +4048,11 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
- if (FLAG_use_range) Run<HRangeAnalysisPhase>();
+ if (FLAG_store_elimination) Run<HStoreEliminationPhase>();
+
+ Run<HRangeAnalysisPhase>();
Run<HComputeChangeUndefinedToNaN>();
- Run<HComputeMinusZeroChecksPhase>();
// Eliminate redundant stack checks on backwards branches.
Run<HStackCheckEliminationPhase>();
@@ -4304,7 +4374,12 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
TestContext* test = TestContext::cast(context);
VisitForControl(stmt->expression(), test->if_true(), test->if_false());
} else if (context->IsEffect()) {
- CHECK_ALIVE(VisitForEffect(stmt->expression()));
+ // Visit in value context and ignore the result. This is needed to keep
+ // environment in sync with full-codegen since some visitors (e.g.
+ // VisitCountOperation) use the operand stack differently depending on
+ // context.
+ CHECK_ALIVE(VisitForValue(stmt->expression()));
+ Pop();
Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
@@ -4361,8 +4436,10 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
Type* combined_type = clause->compare_type();
HControlInstruction* compare = BuildCompareInstruction(
Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
- combined_type, stmt->tag()->position(), clause->label()->position(),
- clause->id());
+ combined_type,
+ ScriptPositionToSourcePosition(stmt->tag()->position()),
+ ScriptPositionToSourcePosition(clause->label()->position()),
+ PUSH_BEFORE_SIMULATE, clause->id());
HBasicBlock* next_test_block = graph()->CreateBasicBlock();
HBasicBlock* body_block = graph()->CreateBasicBlock();
@@ -4782,14 +4859,14 @@ void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
HOptimizedGraphBuilder::GlobalPropertyAccess
HOptimizedGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, bool is_store) {
+ Variable* var, LookupResult* lookup, PropertyAccessType access_type) {
if (var->is_this() || !current_info()->has_global_object()) {
return kUseGeneric;
}
Handle<GlobalObject> global(current_info()->global_object());
global->Lookup(*var->name(), lookup);
if (!lookup->IsNormal() ||
- (is_store && lookup->IsReadOnly()) ||
+ (access_type == STORE && lookup->IsReadOnly()) ||
lookup->holder() != *global) {
return kUseGeneric;
}
@@ -4803,8 +4880,9 @@ HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
HValue* context = environment()->context();
int length = current_info()->scope()->ContextChainLength(var->scope());
while (length-- > 0) {
- context = AddLoadNamedField(
- context, HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+ context = Add<HLoadNamedField>(
+ context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
}
return context;
}
@@ -4835,8 +4913,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
LookupResult lookup(isolate());
- GlobalPropertyAccess type =
- LookupGlobalProperty(variable, &lookup, false);
+ GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, LOAD);
if (type == kUseCell &&
current_info()->global_object()->IsAccessCheckNeeded()) {
@@ -5038,7 +5115,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// TODO(mvstanton): Add a flag to turn off creation of any
// AllocationMementos for this call: we are in crankshaft and should have
// learned enough about transition behavior to stop emitting mementos.
- Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
+ Runtime::FunctionId function_id = Runtime::kHiddenCreateObjectLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
@@ -5071,7 +5148,8 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
HInstruction* store;
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(literal, name, value));
+ CHECK_ALIVE(store = BuildNamedGeneric(
+ STORE, literal, name, value));
} else {
PropertyAccessInfo info(this, STORE, ToType(map), name);
if (info.CanAccessMonomorphic()) {
@@ -5081,8 +5159,8 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
&info, literal, checked_literal, value,
BailoutId::None(), BailoutId::None());
} else {
- CHECK_ALIVE(
- store = BuildStoreNamedGeneric(literal, name, value));
+ CHECK_ALIVE(store = BuildNamedGeneric(
+ STORE, literal, name, value));
}
}
AddInstruction(store);
@@ -5194,7 +5272,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// TODO(mvstanton): Consider a flag to turn off creation of any
// AllocationMementos for this call: we are in crankshaft and should have
// learned enough about transition behavior to stop emitting mementos.
- Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
+ Runtime::FunctionId function_id = Runtime::kHiddenCreateArrayLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
@@ -5258,6 +5336,24 @@ HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
}
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
+ PropertyAccessInfo* info,
+ HValue* checked_object) {
+ HObjectAccess access = info->access();
+ if (access.representation().IsDouble()) {
+ // Load the heap number.
+ checked_object = Add<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL),
+ access.WithRepresentation(Representation::Tagged()));
+ checked_object->set_type(HType::HeapNumber());
+ // Load the double value from it.
+ access = HObjectAccess::ForHeapNumberValue();
+ }
+ return New<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL), access);
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
PropertyAccessInfo* info,
HValue* checked_object,
@@ -5268,7 +5364,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
info->map(), info->lookup(), info->name());
HStoreNamedField *instr;
- if (FLAG_track_double_fields && field_access.representation().IsDouble()) {
+ if (field_access.representation().IsDouble()) {
HObjectAccess heap_number_access =
field_access.WithRepresentation(Representation::Tagged());
if (transition_to_field) {
@@ -5308,30 +5404,12 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
if (transition_to_field) {
HConstant* transition_constant = Add<HConstant>(info->transition());
instr->SetTransition(transition_constant, top_info());
- instr->SetGVNFlag(kChangesMaps);
+ instr->SetChangesFlag(kMaps);
}
return instr;
}
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
- HValue* object,
- Handle<String> name,
- HValue* value,
- bool is_uninitialized) {
- if (is_uninitialized) {
- Add<HDeoptimize>("Insufficient type feedback for property assignment",
- Deoptimizer::SOFT);
- }
-
- return New<HStoreNamedGeneric>(
- object,
- name,
- value,
- function_strict_mode_flag());
-}
-
-
bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
PropertyAccessInfo* info) {
if (!CanInlinePropertyAccess(type_)) return false;
@@ -5513,7 +5591,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
static bool NeedsWrappingFor(Type* type, Handle<JSFunction> target) {
return type->Is(Type::NumberOrString()) &&
- target->shared()->is_classic_mode() &&
+ target->shared()->strict_mode() == SLOPPY &&
!target->shared()->native();
}
@@ -5546,7 +5624,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
if (info->lookup()->IsField()) {
if (info->IsLoad()) {
- return BuildLoadNamedField(checked_holder, info->access());
+ return BuildLoadNamedField(info, checked_holder);
} else {
return BuildStoreNamedField(info, checked_object, value);
}
@@ -5642,7 +5720,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
smi_check = New<HIsSmiAndBranch>(
object, empty_smi_block, not_smi_block);
FinishCurrentBlock(smi_check);
- Goto(empty_smi_block, number_block);
+ GotoNoSimulate(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(object);
@@ -5668,9 +5746,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
FinishCurrentBlock(compare);
if (info.type()->Is(Type::Number())) {
- Goto(if_true, number_block);
+ GotoNoSimulate(if_true, number_block);
if_true = number_block;
- number_block->SetJoinId(ast_id);
}
set_current_block(if_true);
@@ -5704,32 +5781,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- // Because the deopt may be the only path in the polymorphic load, make sure
- // that the environment stack matches the depth on deopt that it otherwise
- // would have had after a successful load.
- if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
- const char* message = "";
- switch (access_type) {
- case LOAD:
- message = "Unknown map in polymorphic load";
- break;
- case STORE:
- message = "Unknown map in polymorphic store";
- break;
- }
- FinishExitWithHardDeoptimization(message, join);
+ FinishExitWithHardDeoptimization("Uknown map in polymorphic access");
} else {
- HValue* result = NULL;
- switch (access_type) {
- case LOAD:
- result = Add<HLoadNamedGeneric>(object, name);
- break;
- case STORE:
- AddInstruction(BuildStoreNamedGeneric(object, name, value));
- result = value;
- break;
- }
- if (!ast_context()->IsEffect()) Push(result);
+ HInstruction* instr = BuildNamedGeneric(access_type, object, name, value);
+ AddInstruction(instr);
+ if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value);
if (join != NULL) {
Goto(join);
@@ -5741,9 +5797,13 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
}
ASSERT(join != NULL);
- join->SetJoinId(ast_id);
- set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ if (join->HasPredecessor()) {
+ join->SetJoinId(ast_id);
+ set_current_block(join);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ } else {
+ set_current_block(NULL);
+ }
}
@@ -5784,8 +5844,7 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
HandleKeyedElementAccess(object, key, value, expr,
- true, // is_store
- &has_side_effects);
+ STORE, &has_side_effects);
Drop(3);
Push(value);
Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
@@ -5835,7 +5894,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HValue* value,
BailoutId ast_id) {
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, STORE);
if (type == kUseCell) {
Handle<GlobalObject> global(current_info()->global_object());
Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
@@ -5873,7 +5932,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HStoreNamedGeneric* instr =
Add<HStoreNamedGeneric>(global_object, var->name(),
- value, function_strict_mode_flag());
+ value, function_strict_mode());
USE(instr);
ASSERT(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -5908,7 +5967,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
case Variable::PARAMETER:
case Variable::LOCAL:
- if (var->mode() == CONST) {
+ if (var->mode() == CONST_LEGACY) {
return Bailout(kUnsupportedConstCompoundAssignment);
}
BindIfLive(var, Top());
@@ -5937,11 +5996,11 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
mode = HStoreContextSlot::kCheckDeoptimize;
break;
case CONST:
- return ast_context()->ReturnValue(Pop());
- case CONST_HARMONY:
// This case is checked statically so no need to
// perform checks here
UNREACHABLE();
+ case CONST_LEGACY:
+ return ast_context()->ReturnValue(Pop());
default:
mode = HStoreContextSlot::kNoCheck;
}
@@ -6006,6 +6065,10 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
if (var->mode() == CONST) {
if (expr->op() != Token::INIT_CONST) {
+ return Bailout(kNonInitializerAssignmentToConst);
+ }
+ } else if (var->mode() == CONST_LEGACY) {
+ if (expr->op() != Token::INIT_CONST_LEGACY) {
CHECK_ALIVE(VisitForValue(expr->value()));
return ast_context()->ReturnValue(Pop());
}
@@ -6016,10 +6079,6 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
HValue* old_value = environment()->Lookup(var);
Add<HUseConst>(old_value);
}
- } else if (var->mode() == CONST_HARMONY) {
- if (expr->op() != Token::INIT_CONST_HARMONY) {
- return Bailout(kNonInitializerAssignmentToConst);
- }
}
if (proxy->IsArguments()) return Bailout(kAssignmentToArguments);
@@ -6075,20 +6134,20 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
mode = HStoreContextSlot::kCheckDeoptimize;
break;
case CONST:
- return ast_context()->ReturnValue(Pop());
- case CONST_HARMONY:
// This case is checked statically so no need to
// perform checks here
UNREACHABLE();
+ case CONST_LEGACY:
+ return ast_context()->ReturnValue(Pop());
default:
mode = HStoreContextSlot::kNoCheck;
}
} else if (expr->op() == Token::INIT_VAR ||
expr->op() == Token::INIT_LET ||
- expr->op() == Token::INIT_CONST_HARMONY) {
+ expr->op() == Token::INIT_CONST) {
mode = HStoreContextSlot::kNoCheck;
} else {
- ASSERT(expr->op() == Token::INIT_CONST);
+ ASSERT(expr->op() == Token::INIT_CONST_LEGACY);
mode = HStoreContextSlot::kCheckIgnoreAssignment;
}
@@ -6128,10 +6187,10 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
Add<HPushArgument>(value);
Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kThrow), 1);
+ Runtime::FunctionForId(Runtime::kHiddenThrow), 1);
Add<HSimulate>(expr->id());
// If the throw definitely exits the function, we can finish with a dummy
@@ -6143,29 +6202,6 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
}
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- HObjectAccess access) {
- if (FLAG_track_double_fields && access.representation().IsDouble()) {
- // load the heap number
- HLoadNamedField* heap_number = Add<HLoadNamedField>(
- object, static_cast<HValue*>(NULL),
- access.WithRepresentation(Representation::Tagged()));
- heap_number->set_type(HType::HeapNumber());
- // load the double value from it
- return New<HLoadNamedField>(
- heap_number, static_cast<HValue*>(NULL),
- HObjectAccess::ForHeapNumberValue());
- }
- return New<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
-}
-
-
-HInstruction* HGraphBuilder::AddLoadNamedField(HValue* object,
- HObjectAccess access) {
- return AddInstruction(BuildLoadNamedField(object, access));
-}
-
-
HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
if (string->IsConstant()) {
HConstant* c_string = HConstant::cast(string);
@@ -6173,9 +6209,10 @@ HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
return Add<HConstant>(c_string->StringValue()->map()->instance_type());
}
}
- return AddLoadNamedField(
- AddLoadNamedField(string, HObjectAccess::ForMap()),
- HObjectAccess::ForMapInstanceType());
+ return Add<HLoadNamedField>(
+ Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap()),
+ static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType());
}
@@ -6186,26 +6223,40 @@ HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) {
return Add<HConstant>(c_string->StringValue()->length());
}
}
- return AddLoadNamedField(string, HObjectAccess::ForStringLength());
+ return Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
+ HObjectAccess::ForStringLength());
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
+HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
+ PropertyAccessType access_type,
HValue* object,
Handle<String> name,
+ HValue* value,
bool is_uninitialized) {
if (is_uninitialized) {
- Add<HDeoptimize>("Insufficient type feedback for generic named load",
+ Add<HDeoptimize>("Insufficient type feedback for generic named access",
Deoptimizer::SOFT);
}
- return New<HLoadNamedGeneric>(object, name);
+ if (access_type == LOAD) {
+ return New<HLoadNamedGeneric>(object, name);
+ } else {
+ return New<HStoreNamedGeneric>(object, name, value, function_strict_mode());
+ }
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
- HValue* key) {
- return New<HLoadKeyedGeneric>(object, key);
+HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
+ PropertyAccessType access_type,
+ HValue* object,
+ HValue* key,
+ HValue* value) {
+ if (access_type == LOAD) {
+ return New<HLoadKeyedGeneric>(object, key);
+ } else {
+ return New<HStoreKeyedGeneric>(object, key, value, function_strict_mode());
+ }
}
@@ -6231,15 +6282,15 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
HValue* val,
HValue* dependency,
Handle<Map> map,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode) {
HCheckMaps* checked_object = Add<HCheckMaps>(object, map, top_info(),
dependency);
if (dependency) {
- checked_object->ClearGVNFlag(kDependsOnElementsKind);
+ checked_object->ClearDependsOnFlag(kElementsKind);
}
- if (is_store && map->prototype()->IsJSObject()) {
+ if (access_type == STORE && map->prototype()->IsJSObject()) {
// monomorphic stores need a prototype chain check because shape
// changes could allow callbacks on elements in the chain that
// aren't compatible with monomorphic keyed stores.
@@ -6258,7 +6309,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
return BuildUncheckedMonomorphicElementAccess(
checked_object, key, val,
map->instance_type() == JS_ARRAY_TYPE,
- map->elements_kind(), is_store,
+ map->elements_kind(), access_type,
load_mode, store_mode);
}
@@ -6324,7 +6375,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
checked_object, key, val,
most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
consolidated_elements_kind,
- false, NEVER_RETURN_HOLE, STANDARD_STORE);
+ LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
return instr;
}
@@ -6334,13 +6385,13 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* key,
HValue* val,
SmallMapList* maps,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode,
bool* has_side_effects) {
*has_side_effects = false;
BuildCheckHeapObject(object);
- if (!is_store) {
+ if (access_type == LOAD) {
HInstruction* consolidated_load =
TryBuildConsolidatedElementLoad(object, key, val, maps);
if (consolidated_load != NULL) {
@@ -6360,6 +6411,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
elements_kind != GetInitialFastElementsKind()) {
possible_transitioned_maps.Add(map);
}
+ if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
+ HInstruction* result = BuildKeyedGeneric(access_type, object, key, val);
+ *has_side_effects = result->HasObservableSideEffects();
+ return AddInstruction(result);
+ }
}
// Get transition target for each map (NULL == no transition).
for (int i = 0; i < maps->length(); ++i) {
@@ -6393,15 +6449,14 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HInstruction* instr = NULL;
if (untransitionable_map->has_slow_elements_kind() ||
!untransitionable_map->IsJSObjectMap()) {
- instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
- : BuildLoadKeyedGeneric(object, key));
+ instr = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
} else {
instr = BuildMonomorphicElementAccess(
- object, key, val, transition, untransitionable_map, is_store,
+ object, key, val, transition, untransitionable_map, access_type,
store_mode);
}
*has_side_effects |= instr->HasObservableSideEffects();
- return is_store ? NULL : instr;
+ return access_type == STORE ? NULL : instr;
}
HBasicBlock* join = graph()->CreateBasicBlock();
@@ -6419,25 +6474,24 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
set_current_block(this_map);
HInstruction* access = NULL;
if (IsDictionaryElementsKind(elements_kind)) {
- access = is_store
- ? AddInstruction(BuildStoreKeyedGeneric(object, key, val))
- : AddInstruction(BuildLoadKeyedGeneric(object, key));
+ access = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
} else {
ASSERT(IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind));
+ IsExternalArrayElementsKind(elements_kind) ||
+ IsFixedTypedArrayElementsKind(elements_kind));
LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
// Happily, mapcompare is a checked object.
access = BuildUncheckedMonomorphicElementAccess(
mapcompare, key, val,
map->instance_type() == JS_ARRAY_TYPE,
- elements_kind, is_store,
+ elements_kind, access_type,
load_mode,
store_mode);
}
*has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add a correct Simulate.
access->SetFlag(HValue::kHasNoObservableSideEffects);
- if (!is_store) {
+ if (access_type == LOAD) {
Push(access);
}
NoObservableSideEffectsScope scope(this);
@@ -6445,12 +6499,16 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
set_current_block(other_map);
}
+ // Ensure that we visited at least one map above that goes to join. This is
+ // necessary because FinishExitWithHardDeoptimization does an AbnormalExit
+ // rather than joining the join block. If this becomes an issue, insert a
+ // generic access in the case length() == 0.
+ ASSERT(join->predecessors()->length() > 0);
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
- FinishExitWithHardDeoptimization("Unknown map in polymorphic element access",
- join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic element access");
set_current_block(join);
- return is_store ? NULL : Pop();
+ return access_type == STORE ? NULL : Pop();
}
@@ -6459,7 +6517,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* key,
HValue* val,
Expression* expr,
- bool is_store,
+ PropertyAccessType access_type,
bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
@@ -6468,7 +6526,8 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
bool monomorphic = ComputeReceiverTypes(expr, obj, &types, zone());
bool force_generic = false;
- if (is_store && (monomorphic || (types != NULL && !types->is_empty()))) {
+ if (access_type == STORE &&
+ (monomorphic || (types != NULL && !types->is_empty()))) {
// Stores can't be mono/polymorphic if their prototype chain has dictionary
// elements. However a receiver map that has dictionary elements itself
// should be left to normal mono/poly behavior (the other maps may benefit
@@ -6486,52 +6545,36 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
if (monomorphic) {
Handle<Map> map = types->first();
if (map->has_slow_elements_kind() || !map->IsJSObjectMap()) {
- instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
- : BuildLoadKeyedGeneric(obj, key);
- AddInstruction(instr);
+ instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
} else {
BuildCheckHeapObject(obj);
instr = BuildMonomorphicElementAccess(
- obj, key, val, NULL, map, is_store, expr->GetStoreMode());
+ obj, key, val, NULL, map, access_type, expr->GetStoreMode());
}
} else if (!force_generic && (types != NULL && !types->is_empty())) {
return HandlePolymorphicElementAccess(
- obj, key, val, types, is_store,
+ obj, key, val, types, access_type,
expr->GetStoreMode(), has_side_effects);
} else {
- if (is_store) {
+ if (access_type == STORE) {
if (expr->IsAssignment() &&
expr->AsAssignment()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed store",
Deoptimizer::SOFT);
}
- instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
if (expr->AsProperty()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed load",
Deoptimizer::SOFT);
}
- instr = BuildLoadKeyedGeneric(obj, key);
}
- AddInstruction(instr);
+ instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
}
*has_side_effects = instr->HasObservableSideEffects();
return instr;
}
-HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
- HValue* object,
- HValue* key,
- HValue* value) {
- return New<HStoreKeyedGeneric>(
- object,
- key,
- value,
- function_strict_mode_flag());
-}
-
-
void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Outermost function already has arguments on the stack.
if (function_state()->outer() == NULL) return;
@@ -6644,11 +6687,7 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedAccess(
&info, object, checked_object, value, ast_id, return_id);
}
- if (access == LOAD) {
- return BuildLoadNamedGeneric(object, name, is_uninitialized);
- } else {
- return BuildStoreNamedGeneric(object, name, value, is_uninitialized);
- }
+ return BuildNamedGeneric(access, object, name, value, is_uninitialized);
}
@@ -6692,9 +6731,7 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr,
- false, // is_store
- &has_side_effects);
+ obj, key, NULL, expr, LOAD, &has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6740,7 +6777,7 @@ HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
AddInstruction(constant_value);
HCheckMaps* check =
Add<HCheckMaps>(constant_value, handle(constant->map()), info);
- check->ClearGVNFlag(kDependsOnElementsKind);
+ check->ClearDependsOnFlag(kElementsKind);
return check;
}
@@ -6824,44 +6861,13 @@ HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
}
-class FunctionSorter {
- public:
- FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
- FunctionSorter(int index, int ticks, int ast_length, int src_length)
- : index_(index),
- ticks_(ticks),
- ast_length_(ast_length),
- src_length_(src_length) { }
-
- int index() const { return index_; }
- int ticks() const { return ticks_; }
- int ast_length() const { return ast_length_; }
- int src_length() const { return src_length_; }
-
- private:
- int index_;
- int ticks_;
- int ast_length_;
- int src_length_;
-};
-
-
-inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
- int diff = lhs.ticks() - rhs.ticks();
- if (diff != 0) return diff > 0;
- diff = lhs.ast_length() - rhs.ast_length();
- if (diff != 0) return diff < 0;
- return lhs.src_length() < rhs.src_length();
-}
-
-
void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
Call* expr,
HValue* receiver,
SmallMapList* types,
Handle<String> name) {
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
- FunctionSorter order[kMaxCallPolymorphism];
+ int order[kMaxCallPolymorphism];
bool handle_smi = false;
bool handled_string = false;
@@ -6883,23 +6889,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
handle_smi = true;
}
expr->set_target(target);
- order[ordered_functions++] =
- FunctionSorter(i,
- expr->target()->shared()->profiler_ticks(),
- InliningAstSize(expr->target()),
- expr->target()->shared()->SourceSize());
+ order[ordered_functions++] = i;
}
}
- std::sort(order, order + ordered_functions);
-
HBasicBlock* number_block = NULL;
HBasicBlock* join = NULL;
handled_string = false;
int count = 0;
for (int fn = 0; fn < ordered_functions; ++fn) {
- int i = order[fn].index();
+ int i = order[fn];
PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
@@ -6919,7 +6919,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
number_block = graph()->CreateBasicBlock();
FinishCurrentBlock(New<HIsSmiAndBranch>(
receiver, empty_smi_block, not_smi_block));
- Goto(empty_smi_block, number_block);
+ GotoNoSimulate(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(receiver);
@@ -6942,9 +6942,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
FinishCurrentBlock(compare);
if (info.type()->Is(Type::Number())) {
- Goto(if_true, number_block);
+ GotoNoSimulate(if_true, number_block);
if_true = number_block;
- number_block->SetJoinId(expr->id());
}
set_current_block(if_true);
@@ -6992,16 +6991,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
- // Because the deopt may be the only path in the polymorphic call, make sure
- // that the environment stack matches the depth on deopt that it otherwise
- // would have had after a successful call.
- Drop(1); // Drop receiver.
- if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
- FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic call");
} else {
Property* prop = expr->expression()->AsProperty();
- HInstruction* function = BuildLoadNamedGeneric(
- receiver, name, prop->IsUninitialized());
+ HInstruction* function = BuildNamedGeneric(
+ LOAD, receiver, name, NULL, prop->IsUninitialized());
AddInstruction(function);
Push(function);
AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
@@ -7105,7 +7099,8 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
- InliningKind inlining_kind) {
+ InliningKind inlining_kind,
+ HSourcePosition position) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -7237,11 +7232,13 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
ASSERT(target_shared->has_deoptimization_support());
AstTyper::Run(&target_info);
+ int function_id = graph()->TraceInlinedFunction(target_shared, position);
+
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
- this, &target_info, inlining_kind);
+ this, &target_info, inlining_kind, function_id);
HConstant* undefined = graph()->GetConstantUndefined();
@@ -7388,7 +7385,8 @@ bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
NULL,
expr->id(),
expr->ReturnId(),
- NORMAL_RETURN);
+ NORMAL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
@@ -7399,7 +7397,8 @@ bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
implicit_return_value,
expr->id(),
expr->ReturnId(),
- CONSTRUCT_CALL_RETURN);
+ CONSTRUCT_CALL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
@@ -7413,7 +7412,8 @@ bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
NULL,
ast_id,
return_id,
- GETTER_CALL_RETURN);
+ GETTER_CALL_RETURN,
+ source_position());
}
@@ -7427,7 +7427,8 @@ bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
1,
implicit_return_value,
id, assignment_id,
- SETTER_CALL_RETURN);
+ SETTER_CALL_RETURN,
+ source_position());
}
@@ -7439,7 +7440,8 @@ bool HOptimizedGraphBuilder::TryInlineApply(Handle<JSFunction> function,
NULL,
expr->id(),
expr->ReturnId(),
- NORMAL_RETURN);
+ NORMAL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
@@ -7455,6 +7457,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
case kMathAbs:
case kMathSqrt:
case kMathLog:
+ case kMathClz32:
if (expr->arguments()->length() == 1) {
HValue* argument = Pop();
Drop(2); // Receiver and function.
@@ -7525,6 +7528,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
case kMathAbs:
case kMathSqrt:
case kMathLog:
+ case kMathClz32:
if (argument_count == 2) {
HValue* argument = Pop();
Drop(2); // Receiver and function.
@@ -7623,7 +7627,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
result = AddElementAccess(elements, reduced_length, NULL,
- bounds_check, elements_kind, false);
+ bounds_check, elements_kind, LOAD);
Factory* factory = isolate()->factory();
double nan_double = FixedDoubleArray::hole_nan_as_double();
HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
@@ -7633,7 +7637,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
elements_kind = FAST_HOLEY_ELEMENTS;
}
AddElementAccess(
- elements, reduced_length, hole, bounds_check, elements_kind, true);
+ elements, reduced_length, hole, bounds_check, elements_kind, STORE);
Add<HStoreNamedField>(
checked_object, HObjectAccess::ForArrayLength(elements_kind),
reduced_length, STORE_TO_INITIALIZED_ENTRY);
@@ -7778,6 +7782,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
}
bool drop_extra = false;
+ bool is_store = false;
switch (call_type) {
case kCallApiFunction:
case kCallApiMethod:
@@ -7804,6 +7809,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
break;
case kCallApiSetter:
{
+ is_store = true;
// Receiver and prototype chain cannot have changed.
ASSERT_EQ(1, argc);
ASSERT_EQ(NULL, receiver);
@@ -7849,7 +7855,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
CallInterfaceDescriptor* descriptor =
isolate()->call_descriptor(Isolate::ApiFunctionCall);
- CallApiFunctionStub stub(true, call_data_is_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_is_undefined, argc);
Handle<Code> code = stub.GetCode(isolate());
HConstant* code_value = Add<HConstant>(code);
@@ -7941,7 +7947,7 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
HValue* HOptimizedGraphBuilder::ImplicitReceiverFor(HValue* function,
Handle<JSFunction> target) {
SharedFunctionInfo* shared = target->shared();
- if (shared->is_classic_mode() && !shared->native()) {
+ if (shared->strict_mode() == SLOPPY && !shared->native()) {
// Cannot embed a direct reference to the global proxy
// as is it dropped on deserialization.
CHECK(!Serializer::enabled());
@@ -7987,6 +7993,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(PushLoad(prop, receiver, key));
HValue* function = Pop();
+ if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+
// Push the function under the receiver.
environment()->SetExpressionStackAt(0, function);
@@ -8041,6 +8049,10 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
return Bailout(kPossibleDirectCallToEval);
}
+ // The function is on the stack in the unoptimized code during
+ // evaluation of the arguments.
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* function = Top();
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
if (global_call) {
Variable* var = proxy->var();
@@ -8049,14 +8061,12 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// access check is not enabled we assume that the function will not change
// and generate optimized code for calling the function.
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, LOAD);
if (type == kUseCell &&
!current_info()->global_object()->IsAccessCheckNeeded()) {
Handle<GlobalObject> global(current_info()->global_object());
known_global_function = expr->ComputeGlobalTarget(global, &lookup);
}
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
if (known_global_function) {
Add<HCheckValue>(function, expr->target());
@@ -8083,18 +8093,13 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
PushArgumentsFromEnvironment(argument_count);
call = BuildCallConstantFunction(expr->target(), argument_count);
} else {
- Push(Add<HPushArgument>(graph()->GetConstantUndefined()));
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
call = New<HCallFunction>(function, argument_count);
- Drop(argument_count);
}
} else if (expr->IsMonomorphic()) {
- // The function is on the stack in the unoptimized code during
- // evaluation of the arguments.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
-
Add<HCheckValue>(function, expr->target());
Push(graph()->GetConstantUndefined());
@@ -8120,13 +8125,10 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
function, expr->target(), argument_count));
} else {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- HValue* receiver = graph()->GetConstantUndefined();
- Push(Add<HPushArgument>(receiver));
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
call = New<HCallFunction>(function, argument_count);
- Drop(argument_count);
}
}
@@ -8211,9 +8213,8 @@ static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
bool HOptimizedGraphBuilder::IsCallNewArrayInlineable(CallNew* expr) {
- bool inline_ok = false;
Handle<JSFunction> caller = current_info()->closure();
- Handle<JSFunction> target(isolate()->global_context()->array_function(),
+ Handle<JSFunction> target(isolate()->native_context()->array_function(),
isolate());
int argument_count = expr->arguments()->length();
// We should have the function plus array arguments on the environment stack.
@@ -8221,6 +8222,7 @@ bool HOptimizedGraphBuilder::IsCallNewArrayInlineable(CallNew* expr) {
Handle<AllocationSite> site = expr->allocation_site();
ASSERT(!site.is_null());
+ bool inline_ok = false;
if (site->CanInlineCall()) {
// We also want to avoid inlining in certain 1 argument scenarios.
if (argument_count == 1) {
@@ -8259,7 +8261,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
Factory* factory = isolate()->factory();
@@ -8289,12 +8291,25 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
// Allocate an instance of the implicit receiver object.
HValue* size_in_bytes = Add<HConstant>(instance_size);
- PretenureFlag pretenure_flag =
- (FLAG_pretenuring_call_new && !FLAG_allocation_site_pretenuring) ?
- isolate()->heap()->GetPretenureMode() : NOT_TENURED;
+ HAllocationMode allocation_mode;
+ if (FLAG_pretenuring_call_new) {
+ if (FLAG_allocation_site_pretenuring) {
+ // Try to use pretenuring feedback.
+ Handle<AllocationSite> allocation_site = expr->allocation_site();
+ allocation_mode = HAllocationMode(allocation_site);
+ // Take a dependency on allocation site.
+ AllocationSite::AddDependentCompilationInfo(allocation_site,
+ AllocationSite::TENURING,
+ top_info());
+ } else {
+ allocation_mode = HAllocationMode(
+ isolate()->heap()->GetPretenureMode());
+ }
+ }
+
HAllocate* receiver =
- Add<HAllocate>(size_in_bytes, HType::JSObject(), pretenure_flag,
- JS_OBJECT_TYPE);
+ BuildAllocate(size_in_bytes, HType::JSObject(), JS_OBJECT_TYPE,
+ allocation_mode);
receiver->set_known_initial_map(initial_map);
// Load the initial map from the constructor.
@@ -8360,7 +8375,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
Handle<JSFunction> array_function(
- isolate()->global_context()->array_function(), isolate());
+ isolate()->native_context()->array_function(), isolate());
bool use_call_new_array = expr->target().is_identical_to(array_function);
if (use_call_new_array && IsCallNewArrayInlineable(expr)) {
// Verify we are still calling the array function for our native context.
@@ -8394,7 +8409,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
const HOptimizedGraphBuilder::InlineFunctionGenerator
HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+ INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
@@ -8416,9 +8431,6 @@ void HGraphBuilder::BuildArrayBufferViewInitialization(
Add<HStoreNamedField>(
obj,
- HObjectAccess::ForJSArrayBufferViewBuffer(), buffer);
- Add<HStoreNamedField>(
- obj,
HObjectAccess::ForJSArrayBufferViewByteOffset(),
byte_offset);
Add<HStoreNamedField>(
@@ -8426,18 +8438,31 @@ void HGraphBuilder::BuildArrayBufferViewInitialization(
HObjectAccess::ForJSArrayBufferViewByteLength(),
byte_length);
- HObjectAccess weak_first_view_access =
- HObjectAccess::ForJSArrayBufferWeakFirstView();
- Add<HStoreNamedField>(obj,
- HObjectAccess::ForJSArrayBufferViewWeakNext(),
- Add<HLoadNamedField>(buffer, static_cast<HValue*>(NULL),
- weak_first_view_access));
- Add<HStoreNamedField>(
- buffer, weak_first_view_access, obj);
+ if (buffer != NULL) {
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewBuffer(), buffer);
+ HObjectAccess weak_first_view_access =
+ HObjectAccess::ForJSArrayBufferWeakFirstView();
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSArrayBufferViewWeakNext(),
+ Add<HLoadNamedField>(buffer,
+ static_cast<HValue*>(NULL),
+ weak_first_view_access));
+ Add<HStoreNamedField>(buffer, weak_first_view_access, obj);
+ } else {
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewBuffer(),
+ Add<HConstant>(static_cast<int32_t>(0)));
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSArrayBufferViewWeakNext(),
+ graph()->GetConstantUndefined());
+ }
}
-void HOptimizedGraphBuilder::VisitDataViewInitialize(
+void HOptimizedGraphBuilder::GenerateDataViewInitialize(
CallRuntime* expr) {
ZoneList<Expression*>* arguments = expr->arguments();
@@ -8460,7 +8485,116 @@ void HOptimizedGraphBuilder::VisitDataViewInitialize(
}
-void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
+static Handle<Map> TypedArrayMap(Isolate* isolate,
+ ExternalArrayType array_type,
+ ElementsKind target_kind) {
+ Handle<Context> native_context = isolate->native_context();
+ Handle<JSFunction> fun;
+ switch (array_type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ fun = Handle<JSFunction>(native_context->type##_array_fun()); \
+ break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ Handle<Map> map(fun->initial_map());
+ return Map::AsElementsKind(map, target_kind);
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
+ ExternalArrayType array_type,
+ bool is_zero_byte_offset,
+ HValue* buffer, HValue* byte_offset, HValue* length) {
+ Handle<Map> external_array_map(
+ isolate()->heap()->MapForExternalArrayType(array_type));
+ HValue* elements =
+ Add<HAllocate>(
+ Add<HConstant>(ExternalArray::kAlignedSize),
+ HType::Tagged(),
+ NOT_TENURED,
+ external_array_map->instance_type());
+
+ AddStoreMapConstant(elements, external_array_map);
+
+ HValue* backing_store = Add<HLoadNamedField>(
+ buffer, static_cast<HValue*>(NULL),
+ HObjectAccess::ForJSArrayBufferBackingStore());
+
+ HValue* typed_array_start;
+ if (is_zero_byte_offset) {
+ typed_array_start = backing_store;
+ } else {
+ HInstruction* external_pointer =
+ AddUncasted<HAdd>(backing_store, byte_offset);
+ // Arguments are checked prior to call to TypedArrayInitialize,
+ // including byte_offset.
+ external_pointer->ClearFlag(HValue::kCanOverflow);
+ typed_array_start = external_pointer;
+ }
+
+
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForExternalArrayExternalPointer(),
+ typed_array_start);
+
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForFixedArrayLength(), length);
+ return elements;
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
+ ExternalArrayType array_type, size_t element_size,
+ ElementsKind fixed_elements_kind,
+ HValue* byte_length, HValue* length) {
+ STATIC_ASSERT(
+ (FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask) == 0);
+ HValue* total_size;
+
+ // if fixed array's elements are not aligned to object's alignment,
+ // we need to align the whole array to object alignment.
+ if (element_size % kObjectAlignment != 0) {
+ total_size = BuildObjectSizeAlignment(
+ byte_length, FixedTypedArrayBase::kHeaderSize);
+ } else {
+ total_size = AddUncasted<HAdd>(byte_length,
+ Add<HConstant>(FixedTypedArrayBase::kHeaderSize));
+ total_size->ClearFlag(HValue::kCanOverflow);
+ }
+
+ Handle<Map> fixed_typed_array_map(
+ isolate()->heap()->MapForFixedTypedArray(array_type));
+ HValue* elements =
+ Add<HAllocate>(total_size, HType::Tagged(),
+ NOT_TENURED,
+ fixed_typed_array_map->instance_type());
+ AddStoreMapConstant(elements, fixed_typed_array_map);
+
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForFixedArrayLength(),
+ length);
+ HValue* filler = Add<HConstant>(static_cast<int32_t>(0));
+
+ {
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+
+ HValue* key = builder.BeginBody(
+ Add<HConstant>(static_cast<int32_t>(0)),
+ length, Token::LT);
+ Add<HStoreKeyed>(elements, key, filler, fixed_elements_kind);
+
+ builder.EndBody();
+ }
+ Add<HStoreNamedField>(
+ elements, HObjectAccess::ForFixedArrayLength(), length);
+ return elements;
+}
+
+
+void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
CallRuntime* expr) {
ZoneList<Expression*>* arguments = expr->arguments();
@@ -8483,8 +8617,13 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
ASSERT(value->IsSmi());
int array_id = Smi::cast(*value)->value();
- CHECK_ALIVE(VisitForValue(arguments->at(kBufferArg)));
- HValue* buffer = Pop();
+ HValue* buffer;
+ if (!arguments->at(kBufferArg)->IsNullLiteral()) {
+ CHECK_ALIVE(VisitForValue(arguments->at(kBufferArg)));
+ buffer = Pop();
+ } else {
+ buffer = NULL;
+ }
HValue* byte_offset;
bool is_zero_byte_offset;
@@ -8498,6 +8637,7 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
CHECK_ALIVE(VisitForValue(arguments->at(kByteOffsetArg)));
byte_offset = Pop();
is_zero_byte_offset = false;
+ ASSERT(buffer != NULL);
}
CHECK_ALIVE(VisitForValue(arguments->at(kByteLengthArg)));
@@ -8510,13 +8650,24 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
byte_offset_smi.Then();
}
+ ExternalArrayType array_type =
+ kExternalInt8Array; // Bogus initialization.
+ size_t element_size = 1; // Bogus initialization.
+ ElementsKind external_elements_kind = // Bogus initialization.
+ EXTERNAL_INT8_ELEMENTS;
+ ElementsKind fixed_elements_kind = // Bogus initialization.
+ INT8_ELEMENTS;
+ Runtime::ArrayIdToTypeAndSize(array_id,
+ &array_type,
+ &external_elements_kind,
+ &fixed_elements_kind,
+ &element_size);
+
+
{ // byte_offset is Smi.
BuildArrayBufferViewInitialization<JSTypedArray>(
obj, buffer, byte_offset, byte_length);
- ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
- size_t element_size = 1; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(array_id, &array_type, &element_size);
HInstruction* length = AddUncasted<HDiv>(byte_length,
Add<HConstant>(static_cast<int32_t>(element_size)));
@@ -8525,40 +8676,19 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
HObjectAccess::ForJSTypedArrayLength(),
length);
- Handle<Map> external_array_map(
- isolate()->heap()->MapForExternalArrayType(array_type));
-
- HValue* elements =
- Add<HAllocate>(
- Add<HConstant>(ExternalArray::kAlignedSize),
- HType::JSArray(),
- NOT_TENURED,
- external_array_map->instance_type());
-
- AddStoreMapConstant(elements, external_array_map);
-
- HValue* backing_store = Add<HLoadNamedField>(
- buffer, static_cast<HValue*>(NULL),
- HObjectAccess::ForJSArrayBufferBackingStore());
-
- HValue* typed_array_start;
- if (is_zero_byte_offset) {
- typed_array_start = backing_store;
+ HValue* elements;
+ if (buffer != NULL) {
+ elements = BuildAllocateExternalElements(
+ array_type, is_zero_byte_offset, buffer, byte_offset, length);
+ Handle<Map> obj_map = TypedArrayMap(
+ isolate(), array_type, external_elements_kind);
+ AddStoreMapConstant(obj, obj_map);
} else {
- HInstruction* external_pointer =
- AddUncasted<HAdd>(backing_store, byte_offset);
- // Arguments are checked prior to call to TypedArrayInitialize,
- // including byte_offset.
- external_pointer->ClearFlag(HValue::kCanOverflow);
- typed_array_start = external_pointer;
- }
-
- Add<HStoreNamedField>(elements,
- HObjectAccess::ForExternalArrayExternalPointer(),
- typed_array_start);
- Add<HStoreNamedField>(elements,
- HObjectAccess::ForFixedArrayLength(),
- length);
+ ASSERT(is_zero_byte_offset);
+ elements = BuildAllocateFixedTypedArray(
+ array_type, element_size, fixed_elements_kind,
+ byte_length, length);
+ }
Add<HStoreNamedField>(
obj, HObjectAccess::ForElementsPointer(), elements);
}
@@ -8566,19 +8696,35 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
if (!is_zero_byte_offset) {
byte_offset_smi.Else();
{ // byte_offset is not Smi.
- Push(Add<HPushArgument>(obj));
- VisitArgument(arguments->at(kArrayIdArg));
- Push(Add<HPushArgument>(buffer));
- Push(Add<HPushArgument>(byte_offset));
- Push(Add<HPushArgument>(byte_length));
+ Push(obj);
+ CHECK_ALIVE(VisitForValue(arguments->at(kArrayIdArg)));
+ Push(buffer);
+ Push(byte_offset);
+ Push(byte_length);
+ PushArgumentsFromEnvironment(kArgsLength);
Add<HCallRuntime>(expr->name(), expr->function(), kArgsLength);
- Drop(kArgsLength);
}
}
byte_offset_smi.End();
}
+void HOptimizedGraphBuilder::GenerateMaxSmi(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
+ return ast_context()->ReturnInstruction(max_smi, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateTypedArrayMaxSizeInHeap(
+ CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ HConstant* result = New<HConstant>(static_cast<int32_t>(
+ FLAG_typed_array_max_size_in_heap));
+ return ast_context()->ReturnInstruction(result, expr->id());
+}
+
+
void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -8590,21 +8736,8 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
const Runtime::Function* function = expr->function();
ASSERT(function != NULL);
- if (function->function_id == Runtime::kDataViewInitialize) {
- return VisitDataViewInitialize(expr);
- }
-
- if (function->function_id == Runtime::kTypedArrayInitialize) {
- return VisitTypedArrayInitialize(expr);
- }
-
- if (function->function_id == Runtime::kMaxSmi) {
- ASSERT(expr->arguments()->length() == 0);
- HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
- return ast_context()->ReturnInstruction(max_smi, expr->id());
- }
-
- if (function->intrinsic_type == Runtime::INLINE) {
+ if (function->intrinsic_type == Runtime::INLINE ||
+ function->intrinsic_type == Runtime::INLINE_OPTIMIZED) {
ASSERT(expr->name()->length() > 0);
ASSERT(expr->name()->Get(0) == '_');
// Call to an inline function.
@@ -8619,13 +8752,12 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
(this->*generator)(expr);
} else {
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
Handle<String> name = expr->name();
int argument_count = expr->arguments()->length();
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
HCallRuntime* call = New<HCallRuntime>(name, function,
argument_count);
- Drop(argument_count);
return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -8656,7 +8788,7 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
HValue* function = AddLoadJSBuiltin(Builtins::DELETE);
Add<HPushArgument>(obj);
Add<HPushArgument>(key);
- Add<HPushArgument>(Add<HConstant>(function_strict_mode_flag()));
+ Add<HPushArgument>(Add<HConstant>(function_strict_mode()));
// TODO(olivf) InvokeFunction produces a check for the parameter count,
// even though we are certain to pass the correct number of arguments here.
HInstruction* instr = New<HInvokeFunction>(function, 3);
@@ -8802,7 +8934,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -8820,7 +8952,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == CONST) {
+ if (var->mode() == CONST_LEGACY) {
return Bailout(kUnsupportedCountOperationWithConst);
}
// Argument of the count operation is a variable, not a property.
@@ -8944,13 +9076,7 @@ static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
}
if (!const32_minus_sa->IsSub()) return false;
HSub* sub = HSub::cast(const32_minus_sa);
- if (sa != sub->right()) return false;
- HValue* const32 = sub->left();
- if (!const32->IsConstant() ||
- HConstant::cast(const32)->Integer32Value() != 32) {
- return false;
- }
- return (sub->right() == sa);
+ return sub->left()->EqualsInteger32Constant(32) && sub->right() == sa;
}
@@ -8999,7 +9125,7 @@ bool CanBeZero(HValue* right) {
HValue* HGraphBuilder::EnforceNumberType(HValue* number,
Type* expected) {
- if (expected->Is(Type::Smi())) {
+ if (expected->Is(Type::SignedSmall())) {
return AddUncasted<HForceRepresentation>(number, Representation::Smi());
}
if (expected->Is(Type::Signed32())) {
@@ -9042,7 +9168,7 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
if (expected_obj->Is(Type::Undefined(zone()))) {
// This is already done by HChange.
- *expected = Type::Union(expected_number, Type::Double(zone()), zone());
+ *expected = Type::Union(expected_number, Type::Float(zone()), zone());
return value;
}
@@ -9078,13 +9204,12 @@ HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
// after phis, which are the result of BuildBinaryOperation when we
// inlined some complex subgraph.
if (result->HasObservableSideEffects() || result->IsPhi()) {
- if (push_sim_result == NO_PUSH_BEFORE_SIMULATE) {
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
- } else {
- ASSERT(push_sim_result == PUSH_BEFORE_SIMULATE);
+ if (push_sim_result == PUSH_BEFORE_SIMULATE) {
Push(result);
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
Drop(1);
+ } else {
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
}
}
return result;
@@ -9238,21 +9363,15 @@ HValue* HGraphBuilder::BuildBinaryOperation(
instr = AddUncasted<HMul>(left, right);
break;
case Token::MOD: {
- if (fixed_right_arg.has_value) {
- if (right->IsConstant()) {
- HConstant* c_right = HConstant::cast(right);
- if (c_right->HasInteger32Value()) {
- ASSERT_EQ(fixed_right_arg.value, c_right->Integer32Value());
- }
- } else {
- HConstant* fixed_right = Add<HConstant>(
- static_cast<int>(fixed_right_arg.value));
- IfBuilder if_same(this);
- if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
- if_same.Then();
- if_same.ElseDeopt("Unexpected RHS of binary operation");
- right = fixed_right;
- }
+ if (fixed_right_arg.has_value &&
+ !right->EqualsInteger32Constant(fixed_right_arg.value)) {
+ HConstant* fixed_right = Add<HConstant>(
+ static_cast<int>(fixed_right_arg.value));
+ IfBuilder if_same(this);
+ if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
+ if_same.Then();
+ if_same.ElseDeopt("Unexpected RHS of binary operation");
+ right = fixed_right;
}
instr = AddUncasted<HMod>(left, right);
break;
@@ -9469,9 +9588,11 @@ void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
BuildBinaryOperation(expr, left, right,
ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
: PUSH_BEFORE_SIMULATE);
- if (FLAG_emit_opt_code_positions && result->IsBinaryOperation()) {
+ if (FLAG_hydrogen_track_positions && result->IsBinaryOperation()) {
HBinaryOperation::cast(result)->SetOperandPositions(
- zone(), expr->left()->position(), expr->right()->position());
+ zone(),
+ ScriptPositionToSourcePosition(expr->left()->position()),
+ ScriptPositionToSourcePosition(expr->right()->position()));
}
return ast_context()->ReturnValue(result);
}
@@ -9505,7 +9626,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
// Check for a few fast cases. The AST visiting behavior must be in sync
// with the full codegen: We don't push both left and right values onto
@@ -9540,7 +9661,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
- if (FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
@@ -9600,9 +9721,14 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnInstruction(result, expr->id());
}
+ PushBeforeSimulateBehavior push_behavior =
+ ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
+ : PUSH_BEFORE_SIMULATE;
HControlInstruction* compare = BuildCompareInstruction(
op, left, right, left_type, right_type, combined_type,
- expr->left()->position(), expr->right()->position(), expr->id());
+ ScriptPositionToSourcePosition(expr->left()->position()),
+ ScriptPositionToSourcePosition(expr->right()->position()),
+ push_behavior, expr->id());
if (compare == NULL) return; // Bailed out.
return ast_context()->ReturnControl(compare, expr->id());
}
@@ -9615,8 +9741,9 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
Type* left_type,
Type* right_type,
Type* combined_type,
- int left_position,
- int right_position,
+ HSourcePosition left_position,
+ HSourcePosition right_position,
+ PushBeforeSimulateBehavior push_sim_result,
BailoutId bailout_id) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
@@ -9641,7 +9768,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
AddCheckMap(operand_to_check, map);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- if (FLAG_emit_opt_code_positions) {
+ if (FLAG_hydrogen_track_positions) {
result->set_operand_position(zone(), 0, left_position);
result->set_operand_position(zone(), 1, right_position);
}
@@ -9681,9 +9808,13 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
if (result->HasObservableSideEffects()) {
- Push(result);
- AddSimulate(bailout_id, REMOVABLE_SIMULATE);
- Drop(1);
+ if (push_sim_result == PUSH_BEFORE_SIMULATE) {
+ Push(result);
+ AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+ Drop(1);
+ } else {
+ AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+ }
}
// TODO(jkummerow): Can we make this more efficient?
HBranch* branch = New<HBranch>(result);
@@ -9692,7 +9823,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HCompareNumericAndBranch* result =
New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
- if (FLAG_emit_opt_code_positions) {
+ if (FLAG_hydrogen_track_positions) {
result->SetOperandPositions(zone(), left_position, right_position);
}
return result;
@@ -9708,7 +9839,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
if (expr->op() == Token::EQ_STRICT) {
@@ -9780,22 +9911,26 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
+ if (pretenure_flag == TENURED &&
+ elements->map() == isolate()->heap()->fixed_cow_array_map() &&
+ isolate()->heap()->InNewSpace(*elements)) {
+ // If we would like to pretenure a fixed cow array, we must ensure that the
+ // array is already in old space, otherwise we'll create too many old-to-
+ // new-space pointers (overflowing the store buffer).
+ elements = Handle<FixedArrayBase>(
+ isolate()->factory()->CopyAndTenureFixedCOWArray(
+ Handle<FixedArray>::cast(elements)));
+ boilerplate_object->set_elements(*elements);
+ }
+
HInstruction* object_elements = NULL;
if (elements_size > 0) {
HValue* object_elements_size = Add<HConstant>(elements_size);
if (boilerplate_object->HasFastDoubleElements()) {
- // Allocation folding will not be able to fold |object| and
- // |object_elements| together if they are pre-tenured.
- if (pretenure_flag == TENURED) {
- HConstant* empty_fixed_array = Add<HConstant>(
- isolate()->factory()->empty_fixed_array());
- Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
- empty_fixed_array);
- }
- object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ object_elements = Add<HAllocate>(object_elements_size, HType::Tagged(),
pretenure_flag, FIXED_DOUBLE_ARRAY_TYPE, site_context->current());
} else {
- object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ object_elements = Add<HAllocate>(object_elements_size, HType::Tagged(),
pretenure_flag, FIXED_ARRAY_TYPE, site_context->current());
}
}
@@ -10028,7 +10163,7 @@ void HOptimizedGraphBuilder::VisitDeclarations(
for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
+ DeclareGlobalsStrictMode::encode(current_info()->strict_mode());
Add<HDeclareGlobals>(array, flags);
globals_.Clear();
}
@@ -10040,7 +10175,7 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_.Add(variable->name(), zone());
@@ -10350,12 +10485,13 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ // We need to follow the evaluation order of full codegen.
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
- HValue* string = Pop();
Add<HSeqStringSetChar>(String::ONE_BYTE_ENCODING, string,
index, value);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
@@ -10366,12 +10502,13 @@ void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ // We need to follow the evaluation order of full codegen.
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
- HValue* string = Pop();
Add<HSeqStringSetChar>(String::TWO_BYTE_ENCODING, string,
index, value);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
@@ -10395,14 +10532,23 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
Add<HStoreNamedField>(object,
HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset),
value);
+ if (!ast_context()->IsEffect()) {
+ Push(value);
+ }
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_objectisvalue.Else();
{
// Nothing to do in this case.
+ if (!ast_context()->IsEffect()) {
+ Push(value);
+ }
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_objectisvalue.End();
+ if (!ast_context()->IsEffect()) {
+ Drop(1);
+ }
return ast_context()->ReturnValue(value);
}
@@ -10477,9 +10623,9 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
// Fast support for SubString.
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
- Drop(3);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10487,9 +10633,9 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
// Fast support for StringCompare.
void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2);
- Drop(2);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10497,9 +10643,38 @@ void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
// Support for direct calls from JavaScript to native RegExp code.
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
- Drop(4);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateDoubleLo(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::LOW);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateDoubleHi(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::HIGH);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateConstructDouble(CallRuntime* call) {
+ ASSERT_EQ(2, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ HValue* lo = Pop();
+ HValue* hi = Pop();
+ HInstruction* result = NewUncasted<HConstructDouble>(hi, lo);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10540,12 +10715,11 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
int arg_count = call->arguments()->length() - 1;
ASSERT(arg_count >= 1); // There's always at least a receiver.
- for (int i = 0; i < arg_count; ++i) {
- CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
- }
- CHECK_ALIVE(VisitForValue(call->arguments()->last()));
-
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ // The function is the last argument
HValue* function = Pop();
+ // Push the arguments to the stack
+ PushArgumentsFromEnvironment(arg_count);
IfBuilder if_is_jsfunction(this);
if_is_jsfunction.If<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
@@ -10554,7 +10728,6 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
{
HInstruction* invoke_result =
Add<HInvokeFunction>(function, arg_count);
- Drop(arg_count);
if (!ast_context()->IsEffect()) {
Push(invoke_result);
}
@@ -10565,7 +10738,6 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
{
HInstruction* call_result =
Add<HCallFunction>(function, arg_count);
- Drop(arg_count);
if (!ast_context()->IsEffect()) {
Push(call_result);
}
@@ -10945,7 +11117,10 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
if (info->IsOptimizing()) {
Handle<String> name = info->function()->debug_name();
PrintStringProperty("name", name->ToCString().get());
- PrintStringProperty("method", name->ToCString().get());
+ PrintIndent();
+ trace_.Add("method \"%s:%d\"\n",
+ name->ToCString().get(),
+ info->optimization_id());
} else {
CodeStub::Major major_key = info->code_stub()->MajorKey();
PrintStringProperty("name", CodeStub::MajorName(major_key, false));
@@ -11059,14 +11234,22 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
Tag HIR_tag(this, "HIR");
for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
- int bci = FLAG_emit_opt_code_positions && instruction->has_position() ?
- instruction->position() : 0;
int uses = instruction->UseCount();
PrintIndent();
- trace_.Add("%d %d ", bci, uses);
+ trace_.Add("0 %d ", uses);
instruction->PrintNameTo(&trace_);
trace_.Add(" ");
instruction->PrintTo(&trace_);
+ if (FLAG_hydrogen_track_positions &&
+ instruction->has_position() &&
+ instruction->position().raw() != 0) {
+ const HSourcePosition pos = instruction->position();
+ trace_.Add(" pos:");
+ if (pos.inlining_id() != 0) {
+ trace_.Add("%d_", pos.inlining_id());
+ }
+ trace_.Add("%d", pos.position());
+ }
trace_.Add(" <|@\n");
}
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index b8344ef9c4..6d81307e2c 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -110,7 +110,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
bool IsFinished() const { return end_ != NULL; }
void AddPhi(HPhi* phi);
void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr, int position);
+ void AddInstruction(HInstruction* instr, HSourcePosition position);
bool Dominates(HBasicBlock* other) const;
bool EqualToOrDominates(HBasicBlock* other) const;
int LoopNestingDepth() const;
@@ -137,7 +137,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
int PredecessorIndexOf(HBasicBlock* predecessor) const;
HPhi* AddNewPhi(int merged_index);
HSimulate* AddNewSimulate(BailoutId ast_id,
- int position,
+ HSourcePosition position,
RemovableSimulate removable = FIXED_SIMULATE) {
HSimulate* instr = CreateSimulate(ast_id, removable);
AddInstruction(instr, position);
@@ -174,6 +174,8 @@ class HBasicBlock V8_FINAL : public ZoneObject {
dominates_loop_successors_ = true;
}
+ void MarkSuccEdgeUnreachable(int succ);
+
inline Zone* zone() const;
#ifdef DEBUG
@@ -184,13 +186,13 @@ class HBasicBlock V8_FINAL : public ZoneObject {
friend class HGraphBuilder;
HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
- void Finish(HControlInstruction* last, int position);
- void FinishExit(HControlInstruction* instruction, int position);
+ void Finish(HControlInstruction* last, HSourcePosition position);
+ void FinishExit(HControlInstruction* instruction, HSourcePosition position);
void Goto(HBasicBlock* block,
- int position,
+ HSourcePosition position,
FunctionState* state = NULL,
bool add_simulate = true);
- void GotoNoSimulate(HBasicBlock* block, int position) {
+ void GotoNoSimulate(HBasicBlock* block, HSourcePosition position) {
Goto(block, position, NULL, false);
}
@@ -198,7 +200,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
// instruction and updating the bailout environment.
void AddLeaveInlined(HValue* return_value,
FunctionState* state,
- int position);
+ HSourcePosition position);
private:
void RegisterPredecessor(HBasicBlock* pred);
@@ -469,6 +471,16 @@ class HGraph V8_FINAL : public ZoneObject {
void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
+ // If we are tracking source positions then this function assigns a unique
+ // identifier to each inlining and dumps function source if it was inlined
+ // for the first time during the current optimization.
+ int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+ HSourcePosition position);
+
+ // Converts given HSourcePosition to the absolute offset from the start of
+ // the corresponding script.
+ int SourcePositionToScriptPosition(HSourcePosition position);
+
private:
HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -514,6 +526,23 @@ class HGraph V8_FINAL : public ZoneObject {
int no_side_effects_scope_count_;
bool disallow_adding_new_values_;
+ class InlinedFunctionInfo {
+ public:
+ explicit InlinedFunctionInfo(Handle<SharedFunctionInfo> shared)
+ : shared_(shared), start_position_(shared->start_position()) {
+ }
+
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
+ int start_position() const { return start_position_; }
+
+ private:
+ Handle<SharedFunctionInfo> shared_;
+ int start_position_;
+ };
+
+ int next_inline_id_;
+ ZoneList<InlinedFunctionInfo> inlined_functions_;
+
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -880,7 +909,8 @@ class FunctionState V8_FINAL {
public:
FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- InliningKind inlining_kind);
+ InliningKind inlining_kind,
+ int inlining_id);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
@@ -910,6 +940,8 @@ class FunctionState V8_FINAL {
bool arguments_pushed() { return arguments_elements() != NULL; }
+ int inlining_id() const { return inlining_id_; }
+
private:
HOptimizedGraphBuilder* owner_;
@@ -939,6 +971,9 @@ class FunctionState V8_FINAL {
HArgumentsObject* arguments_object_;
HArgumentsElements* arguments_elements_;
+ int inlining_id_;
+ HSourcePosition outer_source_position_;
+
FunctionState* outer_;
};
@@ -996,6 +1031,8 @@ class HAllocationMode V8_FINAL BASE_EMBEDDED {
: current_site_(current_site), pretenure_flag_(NOT_TENURED) {}
explicit HAllocationMode(PretenureFlag pretenure_flag)
: current_site_(NULL), pretenure_flag_(pretenure_flag) {}
+ HAllocationMode()
+ : current_site_(NULL), pretenure_flag_(NOT_TENURED) {}
HValue* current_site() const { return current_site_; }
Handle<AllocationSite> feedback_site() const { return feedback_site_; }
@@ -1022,7 +1059,8 @@ class HGraphBuilder {
: info_(info),
graph_(NULL),
current_block_(NULL),
- position_(RelocInfo::kNoPosition) {}
+ position_(HSourcePosition::Unknown()),
+ start_position_(0) {}
virtual ~HGraphBuilder() {}
HBasicBlock* current_block() const { return current_block_; }
@@ -1052,7 +1090,7 @@ class HGraphBuilder {
HBasicBlock* target,
FunctionState* state = NULL,
bool add_simulate = true) {
- from->Goto(target, position_, state, add_simulate);
+ from->Goto(target, source_position(), state, add_simulate);
}
void Goto(HBasicBlock* target,
FunctionState* state = NULL,
@@ -1068,7 +1106,7 @@ class HGraphBuilder {
void AddLeaveInlined(HBasicBlock* block,
HValue* return_value,
FunctionState* state) {
- block->AddLeaveInlined(return_value, state, position_);
+ block->AddLeaveInlined(return_value, state, source_position());
}
void AddLeaveInlined(HValue* return_value, FunctionState* state) {
return AddLeaveInlined(current_block(), return_value, state);
@@ -1274,8 +1312,6 @@ class HGraphBuilder {
void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
- int position() const { return position_; }
-
protected:
virtual bool BuildGraph() = 0;
@@ -1294,7 +1330,7 @@ class HGraphBuilder {
HValue* length,
HValue* key,
bool is_js_array,
- bool is_store);
+ PropertyAccessType access_type);
HValue* BuildCopyElementsOnWrite(HValue* object,
HValue* elements,
@@ -1336,6 +1372,10 @@ class HGraphBuilder {
HValue* dst_offset,
String::Encoding dst_encoding,
HValue* length);
+
+ // Align an object size to object alignment boundary
+ HValue* BuildObjectSizeAlignment(HValue* unaligned_size, int header_size);
+
// Both operands are non-empty strings.
HValue* BuildUncheckedStringAdd(HValue* left,
HValue* right,
@@ -1351,7 +1391,7 @@ class HGraphBuilder {
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
@@ -1361,11 +1401,9 @@ class HGraphBuilder {
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
- HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
- HInstruction* AddLoadNamedField(HValue* object, HObjectAccess access);
HInstruction* AddLoadStringInstanceType(HValue* string);
HInstruction* AddLoadStringLength(HValue* string);
HStoreNamedField* AddStoreMapNoWriteBarrier(HValue* object, HValue* map) {
@@ -1404,8 +1442,7 @@ class HGraphBuilder {
HValue* EnforceNumberType(HValue* number, Type* expected);
HValue* TruncateToNumber(HValue* value, Type** expected);
- void FinishExitWithHardDeoptimization(const char* reason,
- HBasicBlock* continuation);
+ void FinishExitWithHardDeoptimization(const char* reason);
void AddIncrementCounter(StatsCounter* counter);
@@ -1777,6 +1814,27 @@ class HGraphBuilder {
protected:
void SetSourcePosition(int position) {
ASSERT(position != RelocInfo::kNoPosition);
+ position_.set_position(position - start_position_);
+ }
+
+ void EnterInlinedSource(int start_position, int id) {
+ if (FLAG_hydrogen_track_positions) {
+ start_position_ = start_position;
+ position_.set_inlining_id(id);
+ }
+ }
+
+ // Convert the given absolute offset from the start of the script to
+ // the HSourcePosition assuming that this position corresponds to the
+ // same function as current position_.
+ HSourcePosition ScriptPositionToSourcePosition(int position) {
+ HSourcePosition pos = position_;
+ pos.set_position(position - start_position_);
+ return pos;
+ }
+
+ HSourcePosition source_position() { return position_; }
+ void set_source_position(HSourcePosition position) {
position_ = position;
}
@@ -1796,9 +1854,6 @@ class HGraphBuilder {
HValue* mask,
int current_probe);
- void PadEnvironmentForContinuation(HBasicBlock* from,
- HBasicBlock* continuation);
-
template <class I>
I* AddInstructionTyped(I* instr) {
return I::cast(AddInstruction(instr));
@@ -1807,7 +1862,8 @@ class HGraphBuilder {
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
- int position_;
+ HSourcePosition position_;
+ int start_position_;
};
@@ -2059,9 +2115,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void ClearInlinedTestContext() {
function_state()->ClearInlinedTestContext();
}
- StrictModeFlag function_strict_mode_flag() {
- return function_state()->compilation_info()->is_classic_mode()
- ? kNonStrictMode : kStrictMode;
+ StrictMode function_strict_mode() {
+ return function_state()->compilation_info()->strict_mode();
}
// Generators for inline runtime functions.
@@ -2069,7 +2124,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void Generate##Name(CallRuntime* call);
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
+ INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
#undef INLINE_FUNCTION_GENERATOR_DECLARATION
void VisitDelete(UnaryOperation* expr);
@@ -2164,11 +2219,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HBasicBlock* true_block,
HBasicBlock* false_block);
- // Visit an argument subexpression and emit a push to the outgoing arguments.
- void VisitArgument(Expression* expr);
-
- void VisitArgumentList(ZoneList<Expression*>* arguments);
-
// Visit a list of expressions from left to right, each in a value context.
void VisitExpressions(ZoneList<Expression*>* exprs);
@@ -2187,8 +2237,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Type* ToType(Handle<Map> map) { return IC::MapToType<Type>(map, zone()); }
private:
- enum PropertyAccessType { LOAD, STORE };
-
// Helpers for flow graph construction.
enum GlobalPropertyAccess {
kUseCell,
@@ -2196,7 +2244,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
};
GlobalPropertyAccess LookupGlobalProperty(Variable* var,
LookupResult* lookup,
- bool is_store);
+ PropertyAccessType access_type);
void EnsureArgumentsArePushedForAccess();
bool TryArgumentsAccess(Property* expr);
@@ -2213,7 +2261,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
- InliningKind inlining_kind);
+ InliningKind inlining_kind,
+ HSourcePosition position);
bool TryInlineCall(Call* expr);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
@@ -2277,13 +2326,18 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
SmallMapList* types,
Handle<String> name);
- void VisitTypedArrayInitialize(CallRuntime* expr);
+ HValue* BuildAllocateExternalElements(
+ ExternalArrayType array_type,
+ bool is_zero_byte_offset,
+ HValue* buffer, HValue* byte_offset, HValue* length);
+ HValue* BuildAllocateFixedTypedArray(
+ ExternalArrayType array_type, size_t element_size,
+ ElementsKind fixed_elements_kind,
+ HValue* byte_length, HValue* length);
bool IsCallNewArrayInlineable(CallNew* expr);
void BuildInlinedCallNewArray(CallNew* expr);
- void VisitDataViewInitialize(CallRuntime* expr);
-
class PropertyAccessInfo {
public:
PropertyAccessInfo(HOptimizedGraphBuilder* builder,
@@ -2416,23 +2470,27 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void HandleLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil);
- HControlInstruction* BuildCompareInstruction(Token::Value op,
- HValue* left,
- HValue* right,
- Type* left_type,
- Type* right_type,
- Type* combined_type,
- int left_position,
- int right_position,
- BailoutId bailout_id);
-
- HInstruction* BuildStringCharCodeAt(HValue* string,
- HValue* index);
enum PushBeforeSimulateBehavior {
PUSH_BEFORE_SIMULATE,
NO_PUSH_BEFORE_SIMULATE
};
+
+ HControlInstruction* BuildCompareInstruction(
+ Token::Value op,
+ HValue* left,
+ HValue* right,
+ Type* left_type,
+ Type* right_type,
+ Type* combined_type,
+ HSourcePosition left_position,
+ HSourcePosition right_position,
+ PushBeforeSimulateBehavior push_sim_result,
+ BailoutId bailout_id);
+
+ HInstruction* BuildStringCharCodeAt(HValue* string,
+ HValue* index);
+
HValue* BuildBinaryOperation(
BinaryOperation* expr,
HValue* left,
@@ -2440,8 +2498,10 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
PushBeforeSimulateBehavior push_sim_result);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HInstruction* BuildLoadKeyedGeneric(HValue* object,
- HValue* key);
+ HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
+ HValue* object,
+ HValue* key,
+ HValue* value);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
@@ -2455,14 +2515,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* val,
HValue* dependency,
Handle<Map> map,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode);
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode,
bool* has_side_effects);
@@ -2470,12 +2530,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* key,
HValue* val,
Expression* expr,
- bool is_store,
+ PropertyAccessType access_type,
bool* has_side_effects);
- HInstruction* BuildLoadNamedGeneric(HValue* object,
- Handle<String> name,
- bool is_uninitialized = false);
+ HInstruction* BuildNamedGeneric(PropertyAccessType access,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ bool is_uninitialized = false);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
@@ -2499,16 +2561,11 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
BailoutId return_id,
bool is_uninitialized = false);
+ HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
+ HValue* checked_object);
HInstruction* BuildStoreNamedField(PropertyAccessInfo* info,
HValue* checked_object,
HValue* value);
- HInstruction* BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value,
- bool is_uninitialized = false);
- HInstruction* BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value);
HValue* BuildContextChainWalk(Variable* var);
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 5c97c6b8ee..d5ea77dbdf 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -163,7 +163,7 @@ void SetResolvedDateSettings(Isolate* isolate,
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
pattern.length())),
NONE,
- kNonStrictMode);
+ SLOPPY);
// Set time zone and calendar.
const icu::Calendar* calendar = date_format->getCalendar();
@@ -173,7 +173,7 @@ void SetResolvedDateSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("calendar")),
isolate->factory()->NewStringFromAscii(CStrVector(calendar_name)),
NONE,
- kNonStrictMode);
+ SLOPPY);
const icu::TimeZone& tz = calendar->getTimeZone();
icu::UnicodeString time_zone;
@@ -188,7 +188,7 @@ void SetResolvedDateSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("timeZone")),
isolate->factory()->NewStringFromAscii(CStrVector("UTC")),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
JSObject::SetProperty(
resolved,
@@ -199,7 +199,7 @@ void SetResolvedDateSettings(Isolate* isolate,
canonical_time_zone.getBuffer()),
canonical_time_zone.length())),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
}
@@ -216,14 +216,14 @@ void SetResolvedDateSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
isolate->factory()->NewStringFromAscii(CStrVector(ns)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
isolate->factory()->undefined_value(),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
delete numbering_system;
@@ -238,7 +238,7 @@ void SetResolvedDateSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector(result)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
@@ -246,7 +246,7 @@ void SetResolvedDateSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector("und")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
}
@@ -389,7 +389,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
pattern.length())),
NONE,
- kNonStrictMode);
+ SLOPPY);
// Set resolved currency code in options.currency if not empty.
icu::UnicodeString currency(number_format->getCurrency());
@@ -402,7 +402,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
reinterpret_cast<const uint16_t*>(currency.getBuffer()),
currency.length())),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
// Ugly hack. ICU doesn't expose numbering system in any way, so we have
@@ -418,14 +418,14 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
isolate->factory()->NewStringFromAscii(CStrVector(ns)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
isolate->factory()->undefined_value(),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
delete numbering_system;
@@ -434,7 +434,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("useGrouping")),
isolate->factory()->ToBoolean(number_format->isGroupingUsed()),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
@@ -443,7 +443,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewNumberFromInt(
number_format->getMinimumIntegerDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
@@ -452,7 +452,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewNumberFromInt(
number_format->getMinimumFractionDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
@@ -461,7 +461,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewNumberFromInt(
number_format->getMaximumFractionDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY);
Handle<String> key = isolate->factory()->NewStringFromAscii(
CStrVector("minimumSignificantDigits"));
@@ -473,7 +473,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewNumberFromInt(
number_format->getMinimumSignificantDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
key = isolate->factory()->NewStringFromAscii(
@@ -486,7 +486,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewNumberFromInt(
number_format->getMaximumSignificantDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
// Set the locale
@@ -500,7 +500,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector(result)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
@@ -508,7 +508,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector("und")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
}
@@ -589,7 +589,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->ToBoolean(
collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
NONE,
- kNonStrictMode);
+ SLOPPY);
switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
case UCOL_LOWER_FIRST:
@@ -598,7 +598,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
isolate->factory()->NewStringFromAscii(CStrVector("lower")),
NONE,
- kNonStrictMode);
+ SLOPPY);
break;
case UCOL_UPPER_FIRST:
JSObject::SetProperty(
@@ -606,7 +606,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
isolate->factory()->NewStringFromAscii(CStrVector("upper")),
NONE,
- kNonStrictMode);
+ SLOPPY);
break;
default:
JSObject::SetProperty(
@@ -614,7 +614,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
isolate->factory()->NewStringFromAscii(CStrVector("false")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
switch (collator->getAttribute(UCOL_STRENGTH, status)) {
@@ -624,7 +624,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("primary")),
NONE,
- kNonStrictMode);
+ SLOPPY);
// case level: true + s1 -> case, s1 -> base.
if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
@@ -633,14 +633,14 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("case")),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("base")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
break;
}
@@ -650,13 +650,13 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("secondary")),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("accent")),
NONE,
- kNonStrictMode);
+ SLOPPY);
break;
case UCOL_TERTIARY:
JSObject::SetProperty(
@@ -664,13 +664,13 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("tertiary")),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("variant")),
NONE,
- kNonStrictMode);
+ SLOPPY);
break;
case UCOL_QUATERNARY:
// We shouldn't get quaternary and identical from ICU, but if we do
@@ -680,13 +680,13 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("quaternary")),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("variant")),
NONE,
- kNonStrictMode);
+ SLOPPY);
break;
default:
JSObject::SetProperty(
@@ -694,13 +694,13 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("identical")),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("variant")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
JSObject::SetProperty(
@@ -709,7 +709,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->ToBoolean(collator->getAttribute(
UCOL_ALTERNATE_HANDLING, status) == UCOL_SHIFTED),
NONE,
- kNonStrictMode);
+ SLOPPY);
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
@@ -722,7 +722,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector(result)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
@@ -730,7 +730,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector("und")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
}
@@ -785,7 +785,7 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector(result)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
@@ -793,7 +793,7 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector("und")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
}
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index ee5d991e38..8022f0592b 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -85,7 +85,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -97,13 +97,19 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -196,28 +202,28 @@ Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
+ Assembler::target_address_at(pc_ + 1, host_));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(*pc_ == kCallOpcode);
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+ Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start());
}
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Assembler::target_address_at(pc_ + 1);
+ return Assembler::target_address_at(pc_ + 1, host_);
}
void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Assembler::set_target_address_at(pc_ + 1, target);
+ Assembler::set_target_address_at(pc_ + 1, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -248,7 +254,7 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -439,12 +445,15 @@ void Assembler::emit_w(const Immediate& x) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
CPU::FlushICache(p, sizeof(int32_t));
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 733432028a..3a4f590c8f 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -160,6 +160,11 @@ bool RelocInfo::IsCodedSpecially() {
}
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
for (int i = 0; i < instruction_count; i++) {
@@ -1259,6 +1264,14 @@ void Assembler::bts(const Operand& dst, Register src) {
}
+void Assembler::bsr(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBD);
+ emit_operand(dst, src);
+}
+
+
void Assembler::hlt() {
EnsureSpace ensure_space(this);
EMIT(0xF4);
@@ -2555,7 +2568,7 @@ void Assembler::RecordComment(const char* msg, bool force) {
void Assembler::GrowBuffer() {
- ASSERT(overflow());
+ ASSERT(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
@@ -2614,7 +2627,7 @@ void Assembler::GrowBuffer() {
}
}
- ASSERT(!overflow());
+ ASSERT(!buffer_overflow());
}
@@ -2704,6 +2717,19 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 6ed0bc6d66..27e5302db3 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -624,8 +624,21 @@ class Assembler : public AssemblerBase {
void GetCode(CodeDesc* desc);
// Read/Modify the code target in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc);
- inline static void set_target_address_at(Address pc, Address target);
+ inline static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ inline static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target);
+ static inline Address target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -634,8 +647,8 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
}
static const int kSpecialTargetSize = kPointerSize;
@@ -882,6 +895,8 @@ class Assembler : public AssemblerBase {
void bt(const Operand& dst, Register src);
void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(const Operand& dst, Register src);
+ void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
+ void bsr(Register dst, const Operand& src);
// Miscellaneous
void hlt();
@@ -1155,7 +1170,9 @@ class Assembler : public AssemblerBase {
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
- inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+ inline bool buffer_overflow() const {
+ return pc_ >= reloc_info_writer.pos() - kGap;
+ }
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1174,6 +1191,12 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
protected:
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
@@ -1251,7 +1274,7 @@ class Assembler : public AssemblerBase {
class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->overflow()) assembler_->GrowBuffer();
+ if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
#ifdef DEBUG
space_before_ = assembler_->available_space();
#endif
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index d748d23622..785c5fd61c 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -115,7 +115,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@@ -125,19 +125,32 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool count_constructions,
+ bool create_memento) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
+ // -- ebx: allocation site or undefined
// -----------------------------------
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
+
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(ebx);
+ __ push(ebx);
+ }
+
// Store a smi-tagged arguments count on the stack.
__ SmiTag(eax);
__ push(eax);
@@ -189,7 +202,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(edi); // constructor
// The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ pop(edi);
__ pop(eax);
@@ -202,20 +215,26 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
+ if (create_memento) {
+ __ add(edi, Immediate(AllocationMemento::kSize));
+ }
+
__ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+
+ Factory* factory = masm->isolate()->factory();
+
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
- // edi: start of next object
+ // edi: start of next object (including memento if create_memento)
__ mov(Operand(ebx, JSObject::kMapOffset), eax);
- Factory* factory = masm->isolate()->factory();
__ mov(ecx, factory->empty_fixed_array());
__ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
__ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
// Set extra fields in the newly allocated object.
// eax: initial map
// ebx: JSObject
- // edi: start of next object
+ // edi: start of next object (including memento if create_memento)
__ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
__ mov(edx, factory->undefined_value());
if (count_constructions) {
@@ -231,8 +250,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
__ InitializeFieldsWithFiller(ecx, esi, edx);
__ mov(edx, factory->one_pointer_filler_map());
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
+ } else if (create_memento) {
+ __ lea(esi, Operand(edi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+
+ // Fill in memento fields if necessary.
+ // esi: points to the allocated but uninitialized memento.
+ Handle<Map> allocation_memento_map = factory->allocation_memento_map();
+ __ mov(Operand(esi, AllocationMemento::kMapOffset),
+ allocation_memento_map);
+ // Get the cell or undefined.
+ __ mov(edx, Operand(esp, kPointerSize*2));
+ __ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset),
+ edx);
+ } else {
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
}
- __ InitializeFieldsWithFiller(ecx, edi, edx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -323,16 +357,48 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
__ bind(&rt_call);
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ mov(edi, Operand(esp, kPointerSize * 2));
+ __ push(edi);
+ offset = kPointerSize;
+ }
+
// Must restore edi (constructor) before calling runtime.
- __ mov(edi, Operand(esp, 0));
+ __ mov(edi, Operand(esp, offset));
// edi: function (constructor)
__ push(edi);
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(ebx, eax); // store result in ebx
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// New object allocated.
// ebx: newly allocated object
__ bind(&allocated);
+
+ if (create_memento) {
+ __ mov(ecx, Operand(esp, kPointerSize * 2));
+ __ cmp(ecx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // ecx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ add(FieldOperand(ecx, AllocationSite::kPretenureCreateCountOffset),
+ Immediate(Smi::FromInt(1)));
+ __ bind(&count_incremented);
+ }
+
// Retrieve the function from the stack.
__ pop(edi);
@@ -415,17 +481,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
@@ -434,7 +500,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
- __ Set(esi, Immediate(0));
+ __ Move(esi, Immediate(0));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -456,7 +522,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
Label loop, entry;
- __ Set(ecx, Immediate(0));
+ __ Move(ecx, Immediate(0));
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
@@ -473,9 +539,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ mov(ebx, Immediate(undefined_sentinel));
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -503,7 +567,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
@@ -518,7 +582,7 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
__ pop(edi);
}
@@ -622,7 +686,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -654,7 +718,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
// Tear down internal frame.
}
@@ -721,7 +785,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3a. Patch the first argument if necessary when calling a function.
Label shift_arguments;
- __ Set(edx, Immediate(0)); // indicate regular JS_FUNCTION
+ __ Move(edx, Immediate(0)); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -737,7 +801,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
// Call ToObject on the receiver if it is not an object, or use the
@@ -761,7 +825,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ push(ebx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(ebx, eax);
- __ Set(edx, Immediate(0)); // restore
+ __ Move(edx, Immediate(0)); // restore
__ pop(eax);
__ SmiUntag(eax);
@@ -784,11 +848,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3b. Check for function proxy.
__ bind(&slow);
- __ Set(edx, Immediate(1)); // indicate function proxy
+ __ Move(edx, Immediate(1)); // indicate function proxy
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
__ j(equal, &shift_arguments);
__ bind(&non_function);
- __ Set(edx, Immediate(2)); // indicate non-function
+ __ Move(edx, Immediate(2)); // indicate non-function
// 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
@@ -816,7 +880,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label function, non_proxy;
__ test(edx, edx);
__ j(zero, &function);
- __ Set(ebx, Immediate(0));
+ __ Move(ebx, Immediate(0));
__ cmp(edx, Immediate(1));
__ j(not_equal, &non_proxy);
@@ -923,7 +987,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
// Call ToObject on the receiver if it is not an object, or use the
// global object if it is null or undefined.
__ JumpIfSmi(ebx, &call_to_object);
@@ -994,7 +1058,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&call_proxy);
__ push(edi); // add function proxy as last argument
__ inc(eax);
- __ Set(ebx, Immediate(0));
+ __ Move(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -1057,10 +1121,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ mov(ebx, Immediate(undefined_sentinel));
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1131,7 +1192,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Set properties and elements.
Factory* factory = masm->isolate()->factory();
- __ Set(ecx, Immediate(factory->empty_fixed_array()));
+ __ Move(ecx, Immediate(factory->empty_fixed_array()));
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
@@ -1172,7 +1233,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into ebx, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ Set(ebx, Immediate(factory->empty_string()));
+ __ Move(ebx, Immediate(factory->empty_string()));
__ pop(ecx);
__ lea(esp, Operand(esp, kPointerSize));
__ push(ecx);
@@ -1358,7 +1419,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok, Label::kNear);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index e280c50e79..ab29167e9a 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -50,7 +50,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
}
@@ -81,7 +81,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
@@ -92,7 +92,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
@@ -103,15 +104,15 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { ebx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { ebx, edx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -146,7 +147,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
}
@@ -170,6 +171,26 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -217,7 +238,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
@@ -245,7 +266,7 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
@@ -368,7 +389,7 @@ void StringAddStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
}
@@ -825,8 +846,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&try_arithmetic_simplification);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cvttsd2si(exponent, Operand(double_exponent));
- __ cmp(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
+ __ cmp(exponent, Immediate(0x1));
+ __ j(overflow, &call_runtime);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -1046,91 +1067,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = edx;
- Register value = eax;
- Register scratch = ebx;
-
- if (kind() == Code::KEYED_STORE_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
@@ -1190,7 +1126,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// esp[0] : return address
// esp[4] : number of parameters
// esp[8] : receiver displacement
@@ -1211,11 +1147,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ mov(Operand(esp, 2 * kPointerSize), edx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
// esp[0] : return address
@@ -1275,7 +1211,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
+ __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
@@ -1293,7 +1229,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
__ mov(edi, Operand(edi,
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX)));
__ jmp(&copy, Label::kNear);
__ bind(&has_mapped_parameters);
@@ -1330,7 +1266,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ lea(edi, Operand(eax, Heap::kSloppyArgumentsObjectSize));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
// eax = address of new object (tagged)
@@ -1349,7 +1285,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ j(zero, &skip_parameter_map);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate->factory()->non_strict_arguments_elements_map()));
+ Immediate(isolate->factory()->sloppy_arguments_elements_map()));
__ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
__ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
@@ -1436,7 +1372,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ pop(eax); // Remove saved parameter count.
__ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
@@ -1475,7 +1411,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(zero, &add_arguments_object, Label::kNear);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ add(ecx, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -1484,7 +1420,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
__ mov(edi, Operand(edi, offset));
// Copy the JS object part.
@@ -1510,7 +1446,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
+ __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(isolate->factory()->fixed_array_map()));
@@ -1535,7 +1471,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
}
@@ -1544,7 +1480,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1607,7 +1543,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(above, &runtime);
// Reset offset for possibly sliced string.
- __ Set(edi, Immediate(0));
+ __ Move(edi, Immediate(0));
__ mov(eax, Operand(esp, kSubjectOffset));
__ JumpIfSmi(eax, &runtime);
__ mov(edx, eax); // Make a copy of the original subject string.
@@ -1701,7 +1637,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
__ j(above_equal, &runtime);
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(ecx, Immediate(1)); // Type is one byte.
+ __ Move(ecx, Immediate(1)); // Type is one byte.
// (E) Carry on. String handling is done.
__ bind(&check_code);
@@ -1928,7 +1864,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1969,7 +1905,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
__ j(above_equal, &runtime);
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(ecx, Immediate(0)); // Type is two byte.
+ __ Move(ecx, Immediate(0)); // Type is two byte.
__ jmp(&check_code); // Go to (E).
// (10) Not a string or a short external string? If yes, bail out to runtime.
@@ -2066,7 +2002,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_for_nan;
__ cmp(edx, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
__ ret(0);
__ bind(&check_for_nan);
}
@@ -2081,7 +2017,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &not_identical);
}
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -2195,7 +2131,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ j(below, &below_label, Label::kNear);
__ j(above, &above_label, Label::kNear);
- __ Set(eax, Immediate(0));
+ __ Move(eax, Immediate(0));
__ ret(0);
__ bind(&below_label);
@@ -2287,7 +2223,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ j(zero, &return_unequal, Label::kNear);
// The objects are both undetectable, so they both compare as the value
// undefined, and are equal.
- __ Set(eax, Immediate(EQUAL));
+ __ Move(eax, Immediate(EQUAL));
__ bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
@@ -2322,95 +2258,115 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// eax : number of arguments to the construct function
- // ebx : cache cell for call target
+ // ebx : Feedback vector
+ // edx : slot in feedback vector (Smi)
// edi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmp(ecx, edi);
- __ j(equal, &done);
- __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ j(equal, &done);
-
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
- __ j(not_equal, &miss);
-
- // Load the global or builtins object from the current context
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
+ __ j(equal, &done, Label::kFar);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ j(equal, &done, Label::kFar);
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done, Label::kFar);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kNear);
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ jmp(&done, Label::kFar);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(eax);
- __ push(eax);
- __ push(edi);
- __ push(ebx);
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(eax);
+ __ push(eax);
+ __ push(edi);
+ __ push(edx);
+ __ push(ebx);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
- __ pop(ebx);
- __ pop(edi);
- __ pop(eax);
- __ SmiUntag(eax);
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ jmp(&done);
+
+ __ bind(&not_array_function);
}
- __ jmp(&done);
- __ bind(&not_array_function);
- __ mov(FieldOperand(ebx, Cell::kValueOffset), edi);
- // No need for a write barrier here - cells are rescanned.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ edi);
+ // We won't need edx or ebx anymore, just save edi
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
+ __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(edx);
+ __ pop(ebx);
+ __ pop(edi);
__ bind(&done);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // ebx : cache cell for call target
+ // ebx : feedback vector
+ // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// edi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function, wrap, cont;
@@ -2425,6 +2381,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in ebx we need
+ // to set ebx to undefined.
+ __ mov(ebx, Immediate(isolate->factory()->undefined_value()));
}
}
@@ -2468,9 +2428,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ // object (megamorphic symbol) so no write barrier is needed.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
}
// Check for function proxy.
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
@@ -2478,8 +2439,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ pop(ecx);
__ push(edi); // put proxy as additional argument under return address
__ push(ecx);
- __ Set(eax, Immediate(argc_ + 1));
- __ Set(ebx, Immediate(0));
+ __ Move(eax, Immediate(argc_ + 1));
+ __ Move(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
@@ -2490,8 +2451,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// of the original receiver from the call site).
__ bind(&non_function);
__ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
+ __ Move(eax, Immediate(argc_));
+ __ Move(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
@@ -2514,7 +2475,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
- // ebx : cache cell for call target
+ // ebx : feedback vector
+ // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// edi : constructor function
Label slow, non_function_call;
@@ -2526,6 +2489,27 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into ebx.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by edx + 1.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(ebx);
}
// Jump to the function-specific construct stub.
@@ -2550,7 +2534,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing eax).
- __ Set(ebx, Immediate(0));
+ __ Move(ebx, Immediate(0));
Handle<Code> arguments_adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
@@ -2600,23 +2584,9 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ mov(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, 0xf);
- __ cmp(scratch, 0xf);
- __ j(equal, oom_label);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
@@ -2711,15 +2681,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ j(zero, &retry, Label::kNear);
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
-
// Retrieve the pending exception.
__ mov(eax, Operand::StaticVariable(pending_exception_address));
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
-
// Clear the pending exception.
__ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
__ mov(Operand::StaticVariable(pending_exception_address), edx);
@@ -2763,13 +2727,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
// Call into the runtime system.
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
false,
false);
@@ -2777,7 +2739,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
false);
@@ -2787,26 +2748,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
true);
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(Operand::StaticVariable(external_caught), Immediate(false));
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, eax, ecx, &already_have_failure);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException(0x1)));
- __ bind(&already_have_failure);
- __ mov(Operand::StaticVariable(pending_exception), eax);
- // Fall through to the next label.
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, eax);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0);
+ }
__ bind(&throw_termination_exception);
__ ThrowUncatchable(eax);
@@ -3041,7 +2990,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(0));
+ __ Move(eax, Immediate(0));
}
}
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
@@ -3061,7 +3010,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
}
}
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
@@ -3077,20 +3026,20 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Null is not instance of anything.
__ cmp(object, factory->null_value());
__ j(not_equal, &object_not_null, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null);
// Smi values is not instance of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null_or_smi);
// String values is not instance of anything.
Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
__ j(NegateCondition(is_string), &slow, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
@@ -3187,7 +3136,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
@@ -3213,7 +3162,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ SmiTag(index_);
__ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -3238,7 +3187,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ j(not_zero, &slow_case_);
Factory* factory = masm->isolate()->factory();
- __ Set(result_, Immediate(factory->single_character_string_cache()));
+ __ Move(result_, Immediate(factory->single_character_string_cache()));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiShiftSize == 0);
@@ -3609,7 +3558,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// eax: string
@@ -3637,7 +3586,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
__ cmp(length, FieldOperand(right, String::kLengthOffset));
__ j(equal, &check_zero_length, Label::kNear);
__ bind(&strings_not_equal);
- __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
__ ret(0);
// Check if the length is zero.
@@ -3646,7 +3595,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
STATIC_ASSERT(kSmiTag == 0);
__ test(length, length);
__ j(not_zero, &compare_chars, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
// Compare characters.
@@ -3655,7 +3604,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
&strings_not_equal, Label::kNear);
// Characters are equal.
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
}
@@ -3703,7 +3652,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Result is EQUAL.
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
Label result_greater;
@@ -3716,12 +3665,12 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ bind(&result_less);
// Result is LESS.
- __ Set(eax, Immediate(Smi::FromInt(LESS)));
+ __ Move(eax, Immediate(Smi::FromInt(LESS)));
__ ret(0);
// Result is GREATER.
__ bind(&result_greater);
- __ Set(eax, Immediate(Smi::FromInt(GREATER)));
+ __ Move(eax, Immediate(Smi::FromInt(GREATER)));
__ ret(0);
}
@@ -3772,7 +3721,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
__ ret(2 * kPointerSize);
@@ -3791,7 +3740,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
@@ -4178,7 +4127,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ j(not_equal, &done, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ bind(&done);
__ ret(0);
@@ -4223,7 +4172,7 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ j(not_equal, &done, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ bind(&done);
__ ret(0);
@@ -4269,7 +4218,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
// Handle not identical strings.
@@ -4314,7 +4263,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4676,7 +4625,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -4691,13 +4640,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm,
kReturnOnNoNeedToInformIncrementalMarker,
mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ ret(0);
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
@@ -4707,18 +4656,11 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
Immediate(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5137,15 +5079,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count_ == ANY)
- // -- ebx : type info cell
+ // -- ebx : AllocationSite or undefined
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
@@ -5158,25 +5096,15 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ cmp(ebx, Immediate(undefined_sentinel));
- __ j(equal, &okay_here);
- __ cmp(FieldOperand(ebx, 0), Immediate(cell_map));
- __ Assert(equal, kExpectedPropertyCellInRegisterEbx);
- __ bind(&okay_here);
+ // We should either have undefined in ebx or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(ebx);
}
Label no_info;
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ cmp(ebx, Immediate(undefined_sentinel));
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ __ cmp(ebx, masm->isolate()->factory()->undefined_value());
__ j(equal, &no_info);
- __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(FieldOperand(ebx, 0), Immediate(
- masm->isolate()->factory()->allocation_site_map()));
- __ j(not_equal, &no_info);
// Only look at the lower 16 bits of the transition info.
__ mov(edx, FieldOperand(ebx, AllocationSite::kTransitionInfoOffset));
@@ -5229,7 +5157,6 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
- // -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -5301,7 +5228,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = esi;
int argc = ArgumentBits::decode(bit_field_);
- bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5370,9 +5297,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::values_.
__ mov(ApiParameterOperand(3), scratch);
// FunctionCallbackInfo::length_.
- __ Set(ApiParameterOperand(4), Immediate(argc));
+ __ Move(ApiParameterOperand(4), Immediate(argc));
// FunctionCallbackInfo::is_construct_call_.
- __ Set(ApiParameterOperand(5), Immediate(0));
+ __ Move(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
__ lea(scratch, ApiParameterOperand(2));
@@ -5382,15 +5309,20 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Operand context_restore_operand(ebp,
(2 + FCA::kContextSaveIndex) * kPointerSize);
- Operand return_value_operand(ebp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ Operand return_value_operand(ebp, return_value_offset * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address,
thunk_address,
ApiParameterOperand(1),
argc + FCA::kArgsLength + 1,
return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
+ &context_restore_operand);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index e383a9d7e9..cf20a11c6d 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -29,7 +29,6 @@
#define V8_IA32_CODE_STUBS_IA32_H_
#include "macro-assembler.h"
-#include "code-stubs.h"
#include "ic-inl.h"
namespace v8 {
@@ -428,7 +427,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 76a7003bfe..42284ec75c 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -138,7 +138,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ Set(eax, Immediate(0)); // No arguments.
+ __ Move(eax, Immediate(0)); // No arguments.
__ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
@@ -154,7 +154,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
int r = JSCallerSavedCode(i);
Register reg = { r };
if (FLAG_debug_code) {
- __ Set(reg, Immediate(kDebugZapValue));
+ __ Move(reg, Immediate(kDebugZapValue));
}
bool taken = reg.code() == esi.code();
if ((object_regs & (1 << r)) != 0) {
@@ -280,10 +280,12 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-ia32.cc).
// ----------- S t a t e -------------
- // -- ebx: cache cell for call target
+ // -- ebx: feedback array
+ // -- edx: slot in feedback array
// -- edi: function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
+ 0, false);
}
@@ -306,11 +308,13 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// above IC call.
// ----------- S t a t e -------------
// -- eax: number of arguments (not smi)
- // -- ebx: cache cell for call target
+ // -- ebx: feedback array
+ // -- edx: feedback slot (smi)
// -- edi: constructor function
// -----------------------------------
// The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
+ eax.bit(), false);
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 5300dde9a2..711cdf86fb 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -116,6 +116,27 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
+
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->int3();
+ }
+ }
+
// We will overwrite the code's relocation info in-place. Relocation info
// is written backward. The relocation info is the payload of a byte
// array. Later on we will slide this to the start of the byte array and
@@ -124,9 +145,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address reloc_end_address = reloc_info->address() + reloc_info->Size();
RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
-
// Since the call is a relative encoding, write new
// reloc info. We do not need any of the existing reloc info because the
// existing code will not be used again (we zap it in debug builds).
@@ -134,9 +152,14 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Emit call to lazy deoptimization at all lazy deopt points.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Patch lazy deoptimization entry.
@@ -440,6 +463,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 6a7f3bc837..e50a78e345 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -407,10 +407,11 @@ int DisassemblerIA32::PrintRightOperandHelper(
return 2;
} else if (base == ebp) {
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
+ AppendToBuffer("[%s*%d%s0x%x]",
(this->*register_name)(index),
1 << scale,
- disp);
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return 6;
} else if (index != esp && base != ebp) {
// [base+index*scale]
@@ -434,23 +435,30 @@ int DisassemblerIA32::PrintRightOperandHelper(
byte sib = *(modrmp + 1);
int scale, index, base;
get_sib(sib, &scale, &index, &base);
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
+ int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2)
+ : *reinterpret_cast<int8_t*>(modrmp + 2);
if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+ AppendToBuffer("[%s%s0x%x]",
+ (this->*register_name)(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
} else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
+ AppendToBuffer("[%s+%s*%d%s0x%x]",
(this->*register_name)(base),
(this->*register_name)(index),
1 << scale,
- disp);
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
} else {
// No sib.
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+ int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1)
+ : *reinterpret_cast<int8_t*>(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]",
+ (this->*register_name)(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return mod == 2 ? 5 : 2;
}
break;
@@ -881,6 +889,7 @@ static const char* F0Mnem(byte f0byte) {
case 0xAD: return "shrd";
case 0xAC: return "shrd"; // 3-operand version.
case 0xAB: return "bts";
+ case 0xBD: return "bsr";
default: return NULL;
}
}
@@ -1096,22 +1105,26 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += SetCC(data);
} else if ((f0byte & 0xF0) == 0x40) {
data += CMov(data);
- } else {
+ } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
+ // shrd, shld, bts
data += 2;
- if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
- // shrd, shld, bts
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- if (f0byte == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ if (f0byte == 0xAB) {
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
} else {
- UnimplementedInstruction();
+ AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (f0byte == 0xBD) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ UnimplementedInstruction();
}
}
break;
@@ -1606,13 +1619,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- } else if (b2 == 0x6F) {
+ } else if (b2 == 0x6F) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- } else if (b2 == 0x7F) {
+ } else if (b2 == 0x7F) {
AppendToBuffer("movdqu ");
data += 3;
int mod, regop, rm;
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index e0f3e32f7c..2d6145eeac 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -84,6 +84,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+ static const int kConstantPoolOffset = 0; // Not used
};
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index f3125666f8..70a968e8a2 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -101,6 +101,25 @@ class JumpPatchSite BASE_EMBEDDED {
};
+static void EmitStackCheck(MacroAssembler* masm_,
+ int pointers = 0,
+ Register scratch = esp) {
+ Label ok;
+ Isolate* isolate = masm_->isolate();
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate);
+ ASSERT(scratch.is(esp) == (pointers == 0));
+ if (pointers != 0) {
+ __ mov(scratch, esp);
+ __ sub(scratch, Immediate(pointers * kPointerSize));
+ }
+ __ cmp(scratch, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@@ -118,6 +137,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -132,10 +154,10 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->is_classic_mode() && !info->is_native()) {
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@@ -168,8 +190,26 @@ void FullCodeGenerator::Generate() {
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, locals_count, ecx);
+ }
__ mov(eax, Immediate(isolate()->factory()->undefined_value()));
- for (int i = 0; i < locals_count; i++) {
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(ecx, loop_iterations);
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ push(eax);
+ }
+ __ dec(ecx);
+ __ j(not_zero, &loop_header, Label::kNear);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
__ push(eax);
}
}
@@ -185,13 +225,13 @@ void FullCodeGenerator::Generate() {
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
__ push(edi);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in eax. It replaces the context passed to us.
@@ -242,12 +282,12 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(type);
__ CallStub(&stub);
@@ -273,7 +313,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -282,13 +322,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ EmitStackCheck(masm_);
}
{ Comment cmnt(masm_, "[ Body");
@@ -308,7 +342,7 @@ void FullCodeGenerator::Generate() {
void FullCodeGenerator::ClearAccumulator() {
- __ Set(eax, Immediate(Smi::FromInt(0)));
+ __ Move(eax, Immediate(Smi::FromInt(0)));
}
@@ -467,9 +501,9 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
if (lit->IsSmi()) {
- __ SafeSet(result_register(), Immediate(lit));
+ __ SafeMove(result_register(), Immediate(lit));
} else {
- __ Set(result_register(), Immediate(lit));
+ __ Move(result_register(), Immediate(lit));
}
}
@@ -626,7 +660,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
+ CallIC(ic, condition->test_id());
__ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -743,7 +777,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -790,7 +824,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -843,7 +877,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ push(Immediate(variable->name()));
__ push(Immediate(Smi::FromInt(NONE)));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -913,7 +947,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ push(esi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -921,7 +955,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -977,7 +1011,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1021,6 +1055,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+
SetStatementPosition(stmt);
Label loop, exit;
@@ -1099,20 +1135,22 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(ebx, cell);
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
+ Immediate(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
__ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
__ j(above, &non_proxy);
- __ Set(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
+ __ Move(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
__ bind(&non_proxy);
__ push(ebx); // Smi
__ push(eax); // Array
@@ -1260,7 +1298,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
@@ -1269,7 +1307,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ push(Immediate(pretenure
? isolate()->factory()->true_value()
: isolate()->factory()->false_value()));
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(eax);
}
@@ -1290,7 +1328,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1304,7 +1342,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1349,7 +1387,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1386,16 +1424,15 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ mov(eax, isolate()->factory()->undefined_value());
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1412,7 +1449,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object in eax.
__ mov(edx, GlobalObjectOperand());
@@ -1425,9 +1462,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1459,7 +1495,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1469,14 +1505,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(eax, var);
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ mov(eax, isolate()->factory()->undefined_value());
}
__ bind(&done);
@@ -1489,15 +1525,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ push(esi); // Context.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1528,7 +1564,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->pattern()));
__ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -1540,7 +1576,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(ebx);
__ bind(&allocated);
@@ -1581,8 +1617,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1590,7 +1625,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -1633,7 +1668,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->value()));
__ mov(edx, Operand(esp, 0));
- CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1743,7 +1778,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
@@ -1814,13 +1849,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -1960,7 +1991,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ cmp(esp, ebx);
__ j(equal, &post_runtime);
__ push(eax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
@@ -2028,7 +2059,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(ecx, esi);
__ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ pop(eax); // result
@@ -2047,7 +2078,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_call);
__ mov(edx, Operand(esp, kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
+ CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2082,7 +2113,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in eax, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
// ebx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
@@ -2162,7 +2193,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(ebx);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2176,14 +2207,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
} else {
// Throw the provided value.
__ push(eax);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(ebx);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2201,7 +2232,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2237,7 +2268,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2258,8 +2289,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2269,10 +2299,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
switch (op) {
case Token::SAR:
- __ SmiUntag(eax);
__ SmiUntag(ecx);
__ sar_cl(eax); // No checks of result necessary
- __ SmiTag(eax);
+ __ and_(eax, Immediate(~kSmiTagMask));
break;
case Token::SHL: {
Label result_ok;
@@ -2344,20 +2373,14 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(edx);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidLeftHandSide());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2383,7 +2406,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(edx, eax);
__ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->value());
- CallStoreIC(NOT_CONTEXTUAL);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2393,7 +2416,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(ecx, eax);
__ pop(edx); // Receiver.
__ pop(eax); // Restore value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2404,44 +2427,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ mov(location, eax);
+ if (var->IsContextSlot()) {
+ __ mov(edx, eax);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ push(eax); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(name));
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
- CallStoreIC(CONTEXTUAL);
- } else if (op == Token::INIT_CONST) {
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ mov(edx, StackOperand(var));
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip);
- __ mov(StackOperand(var), eax);
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ push(eax);
__ push(esi);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &skip, Label::kNear);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2450,20 +2487,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &assign, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&assign);
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2471,20 +2506,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ Check(equal, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2504,7 +2526,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
SetSourcePosition(expr->position());
__ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
- CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2520,10 +2542,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(edx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -2552,10 +2574,8 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ call(code, RelocInfo::CODE_TARGET, ast_id);
}
@@ -2576,7 +2596,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
+ // is a sloppy mode method.
__ push(Immediate(isolate()->factory()->undefined_value()));
flags = NO_CALL_FUNCTION_FLAGS;
} else {
@@ -2668,15 +2688,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(ebx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(Smi::FromInt(expr->CallFeedbackSlot())));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2696,13 +2716,13 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the receiver of the enclosing function.
__ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the language mode.
- __ push(Immediate(Smi::FromInt(language_mode())));
+ __ push(Immediate(Smi::FromInt(strict_mode())));
// Push the start position of the scope the calls resides in.
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2718,8 +2738,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2769,7 +2789,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// the object holding it (returned in edx).
__ push(context_register());
__ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ push(eax); // Function.
__ push(edx); // Receiver.
@@ -2843,15 +2863,22 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
SetSourcePosition(expr->position());
// Load function and argument count into edi and eax.
- __ Set(eax, Immediate(arg_count));
+ __ Move(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(ebx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET);
__ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -3108,9 +3135,11 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(eax, map, if_false, DO_SMI_CHECK);
- __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x80000000));
- __ j(not_equal, if_false);
- __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x00000000));
+ // Check if the exponent half is 0x80000000. Comparing against 1 and
+ // checking for overflow is the shortest possible encoding.
+ __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1));
+ __ j(no_overflow, if_false);
+ __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3227,7 +3256,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
- __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
@@ -3239,7 +3268,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
Label exit;
// Get the number of formal parameters.
- __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -3331,7 +3360,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ __ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
__ mov(eax, isolate()->factory()->undefined_value());
@@ -3424,7 +3453,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(result);
}
@@ -3606,13 +3635,13 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// NaN.
- __ Set(result, Immediate(isolate()->factory()->nan_value()));
+ __ Move(result, Immediate(isolate()->factory()->nan_value()));
__ jmp(&done);
__ bind(&need_conversion);
// Move the undefined value into the result register, which will
// trigger conversion.
- __ Set(result, Immediate(isolate()->factory()->undefined_value()));
+ __ Move(result, Immediate(isolate()->factory()->undefined_value()));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
@@ -3654,13 +3683,13 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ Set(result, Immediate(isolate()->factory()->empty_string()));
+ __ Move(result, Immediate(isolate()->factory()->empty_string()));
__ jmp(&done);
__ bind(&need_conversion);
// Move smi zero into the result register, which will trigger
// conversion.
- __ Set(result, Immediate(Smi::FromInt(0)));
+ __ Move(result, Immediate(Smi::FromInt(0)));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
@@ -3806,7 +3835,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// Call runtime to perform the lookup.
__ push(cache);
__ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(eax);
@@ -3911,8 +3940,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that all array elements are sequential ASCII strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, Immediate(0));
- __ Set(string_length, Immediate(0));
+ __ Move(index, Immediate(0));
+ __ Move(string_length, Immediate(0));
// Loop condition: while (index < length).
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
@@ -4028,7 +4057,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ mov_b(separator_operand, scratch);
- __ Set(index, Immediate(0));
+ __ Move(index, Immediate(0));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&loop_2_entry);
@@ -4065,7 +4094,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Long separator case (separator is more than one character).
__ bind(&long_separator);
- __ Set(index, Immediate(0));
+ __ Move(index, Immediate(0));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&loop_3_entry);
@@ -4116,8 +4145,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4181,20 +4210,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ push(Immediate(Smi::FromInt(strict_mode_flag)));
+ __ push(Immediate(Smi::FromInt(strict_mode())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(kNonStrictMode)));
+ __ push(Immediate(Smi::FromInt(SLOPPY)));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
@@ -4207,7 +4234,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(eax);
}
} else {
@@ -4288,16 +4315,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4416,9 +4438,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- NOT_CONTEXTUAL,
- expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4449,7 +4469,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
- CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4463,10 +4483,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(ecx);
__ pop(edx);
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4488,7 +4508,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference
@@ -4497,6 +4517,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4506,7 +4527,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ push(esi);
__ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4655,7 +4676,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4691,7 +4712,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
__ test(eax, eax);
Split(not_zero, if_true, if_false, fall_through);
}
@@ -4881,6 +4902,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
}
Assembler::set_target_address_at(call_target_address,
+ unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
@@ -4898,20 +4920,22 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
if (*jns_instr_address == kJnsInstruction) {
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return INTERRUPT;
}
ASSERT_EQ(kNopByteOne, *jns_instr_address);
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address) ==
+ if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return OSR_AFTER_STACK_CHECK;
}
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index bd6dcefe15..c2be7da1a4 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -351,7 +351,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ j(not_zero, slow_case);
// Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
@@ -657,7 +657,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
@@ -682,7 +682,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -859,7 +859,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -947,8 +947,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
@@ -956,9 +955,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
@@ -1064,17 +1061,14 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, no_reg);
@@ -1136,7 +1130,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -1157,7 +1151,7 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 5a12ca9690..0dbe3da13d 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -103,7 +103,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -175,11 +175,11 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
+ info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
// +1 for return address.
@@ -199,7 +199,7 @@ bool LCodeGen::GeneratePrologue() {
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
- __ Set(edx, Immediate(kNoAlignmentPadding));
+ __ Move(edx, Immediate(kNoAlignmentPadding));
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
@@ -297,7 +297,7 @@ bool LCodeGen::GeneratePrologue() {
__ CallStub(&stub);
} else {
__ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in eax. It replaces the context passed to us.
@@ -346,7 +346,7 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
// Move state of dynamic frame alignment into edx.
- __ Set(edx, Immediate(kNoAlignmentPadding));
+ __ Move(edx, Immediate(kNoAlignmentPadding));
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
Label do_not_pad, align_loop;
@@ -390,6 +390,9 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
@@ -479,7 +482,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -954,10 +958,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -1181,6 +1181,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1366,301 +1374,325 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->RightIsPowerOf2()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
-
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ neg(left_reg);
- __ and_(left_reg, divisor - 1);
- __ neg(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ test(dividend, dividend);
+ __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+ // Note that this is correct even for kMinInt operands.
+ __ neg(dividend);
+ __ and_(dividend, mask);
+ __ neg(dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
}
+ __ jmp(&done, Label::kNear);
+ }
- __ bind(&left_is_not_negative);
- __ and_(left_reg, divisor - 1);
- __ bind(&done);
- } else {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(eax));
- Register right_reg = ToRegister(instr->right());
- ASSERT(!right_reg.is(eax));
- ASSERT(!right_reg.is(edx));
- Register result_reg = ToRegister(instr->result());
- ASSERT(result_reg.is(edx));
+ __ bind(&dividend_is_not_negative);
+ __ and_(dividend, mask);
+ __ bind(&done);
+}
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (right->CanBeZero()) {
- __ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label no_overflow_possible;
- __ cmp(left_reg, kMinInt);
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ cmp(right_reg, -1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Set(result_reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(eax));
- // Sign extend dividend in eax into edx:eax.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &positive_left, Label::kNear);
- __ idiv(right_reg);
- __ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr->environment());
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ __ imul(edx, edx, Abs(divisor));
+ __ mov(eax, dividend);
+ __ sub(eax, edx);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ j(not_zero, &remainder_not_zero, Label::kNear);
+ __ cmp(dividend, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(eax));
+ Register right_reg = ToRegister(instr->right());
+ ASSERT(!right_reg.is(eax));
+ ASSERT(!right_reg.is(edx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(edx));
+
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(right_reg, Operand(right_reg));
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmp(left_reg, kMinInt);
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ cmp(right_reg, -1);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Move(result_reg, Immediate(0));
__ jmp(&done, Label::kNear);
- __ bind(&positive_left);
}
+ __ bind(&no_overflow_possible);
+ }
+
+ // Sign extend dividend in eax into edx:eax.
+ __ cdq();
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ __ test(left_reg, Operand(left_reg));
+ __ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
- __ bind(&done);
+ __ test(result_reg, Operand(result_reg));
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive_left);
}
+ __ idiv(right_reg);
+ __ bind(&done);
}
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, Operand(dividend));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmp(dividend, kMinInt);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ test(dividend, Immediate(mask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sar(result, 31);
+ __ shr(result, 32 - shift);
+ __ add(result, dividend);
+ __ sar(result, shift);
+ }
+ if (divisor < 0) __ neg(result);
+}
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- Label done, negative;
- __ cmp(dividend, 0);
- __ j(less, &negative, Label::kNear);
- __ sar(dividend, power);
- if (divisor < 0) __ neg(dividend);
- __ jmp(&done, Label::kNear);
-
- __ bind(&negative);
- __ neg(dividend);
- __ sar(dividend, power);
- if (divisor > 0) __ neg(dividend);
- __ bind(&done);
- return; // Don't fall through to "__ neg" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ test(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sar(dividend, power);
- }
- }
- if (divisor < 0) __ neg(dividend);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(edx));
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(edx));
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mov(eax, edx);
+ __ imul(eax, eax, divisor);
+ __ sub(eax, dividend);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+}
- Register left_reg = eax;
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->left());
+ Register divisor = ToRegister(instr->right());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(eax));
+ ASSERT(remainder.is(edx));
+ ASSERT(result.is(eax));
+ ASSERT(!divisor.is(eax));
+ ASSERT(!divisor.is(edx));
// Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, ToOperand(right));
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(divisor, divisor);
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ test(left_reg, Operand(left_reg));
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ test(right_reg, ToOperand(right));
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ test(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ test(divisor, divisor);
DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
+ __ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left_reg, kMinInt);
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmp(right_reg, -1);
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmp(dividend, kMinInt);
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmp(divisor, -1);
DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
+ __ bind(&dividend_not_min_int);
}
- // Sign extend to edx.
+ // Sign extend to edx (= remainder).
__ cdq();
- __ idiv(right_reg);
+ __ idiv(divisor);
- if (instr->is_flooring()) {
+ if (hdiv->IsMathFloorOfDiv()) {
Label done;
- __ test(edx, edx);
+ __ test(remainder, remainder);
__ j(zero, &done, Label::kNear);
- __ xor_(edx, right_reg);
- __ sar(edx, 31);
- __ add(eax, edx);
+ __ xor_(remainder, divisor);
+ __ sar(remainder, 31);
+ __ add(result, remainder);
__ bind(&done);
- } else if (!instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
+ } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
- __ test(edx, Operand(edx));
+ __ test(remainder, remainder);
DeoptimizeIf(not_zero, instr->environment());
}
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
- Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- Register result = ToRegister(instr->result());
-
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
- return;
-
- case 1:
- __ Move(result, dividend);
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sar(dividend, shift);
return;
+ }
- case -1:
- __ Move(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // If the divisor is negative, we have to negate and handle edge cases.
+ Label not_kmin_int, done;
+ __ neg(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ if (divisor == -1) {
DeoptimizeIf(overflow, instr->environment());
+ } else {
+ __ j(no_overflow, &not_kmin_int, Label::kNear);
+ __ mov(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
}
+ }
+ __ bind(&not_kmin_int);
+ __ sar(dividend, shift);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(edx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- // Input[dividend] is clobbered.
- // The sequence is tedious because neg(dividend) might overflow.
- __ mov(result, dividend);
- __ sar(dividend, 31);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ shl(dividend, 32 - power);
- __ sar(result, power);
- __ not_(dividend);
- // Clear result.sign if dividend.sign is set.
- __ and_(result, dividend);
- } else {
- __ Move(result, dividend);
- __ sar(result, power);
- }
- } else {
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(ToRegister(instr->result()).is(edx));
- Register scratch = ToRegister(instr->temp());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - std::floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(std::floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- __ mov(scratch, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- __ mov(edx, static_cast<int32_t>(multiplier));
- __ imul(edx);
- if (static_cast<int32_t>(multiplier) < 0) {
- __ add(edx, scratch);
- }
- Register reg_lo = eax;
- Register reg_byte_scratch = scratch;
- if (!reg_byte_scratch.is_byte_register()) {
- __ xchg(reg_lo, reg_byte_scratch);
- reg_lo = scratch;
- reg_byte_scratch = eax;
- }
- if (divisor < 0) {
- __ xor_(reg_byte_scratch, reg_byte_scratch);
- __ cmp(reg_lo, 0x40000000);
- __ setcc(above, reg_byte_scratch);
- __ neg(edx);
- __ sub(edx, reg_byte_scratch);
- } else {
- __ xor_(reg_byte_scratch, reg_byte_scratch);
- __ cmp(reg_lo, 0xC0000000);
- __ setcc(above_equal, reg_byte_scratch);
- __ add(edx, reg_byte_scratch);
- }
- __ sar(edx, shift - 32);
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
}
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp3());
+ ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
+ Label needs_adjustment, done;
+ __ cmp(dividend, Immediate(0));
+ __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ __ jmp(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(temp, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ __ dec(edx);
+ __ bind(&done);
}
@@ -1894,12 +1926,12 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+ __ Move(ToRegister(instr->result()), Immediate(instr->value()));
}
void LCodeGen::DoConstantS(LConstantS* instr) {
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+ __ Move(ToRegister(instr->result()), Immediate(instr->value()));
}
@@ -1926,22 +1958,22 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope2(masm(), SSE4_1);
if (lower != 0) {
- __ Set(temp, Immediate(lower));
+ __ Move(temp, Immediate(lower));
__ movd(res, Operand(temp));
- __ Set(temp, Immediate(upper));
+ __ Move(temp, Immediate(upper));
__ pinsrd(res, Operand(temp), 1);
} else {
__ xorps(res, res);
- __ Set(temp, Immediate(upper));
+ __ Move(temp, Immediate(upper));
__ pinsrd(res, Operand(temp), 1);
}
} else {
- __ Set(temp, Immediate(upper));
+ __ Move(temp, Immediate(upper));
__ movd(res, Operand(temp));
__ psllq(res, 32);
if (lower != 0) {
XMMRegister xmm_scratch = double_scratch0();
- __ Set(temp, Immediate(lower));
+ __ Move(temp, Immediate(lower));
__ movd(xmm_scratch, Operand(temp));
__ orps(res, xmm_scratch);
}
@@ -2622,8 +2654,8 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- EmitFalseBranch(instr, not_equal);
+ Immediate(0x1));
+ EmitFalseBranch(instr, no_overflow);
__ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
EmitBranch(instr, equal);
@@ -3409,7 +3441,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3707,7 +3739,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(esi); // The context is the first argument.
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3831,7 +3863,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
@@ -3923,8 +3955,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
+ __ cmp(output_reg, 0x1);
+ DeoptimizeIf(overflow, instr->environment());
} else {
Label negative_sign, done;
// Deoptimize on unordered.
@@ -3940,7 +3972,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment());
- __ Set(output_reg, Immediate(0));
+ __ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&positive_sign);
}
@@ -3948,8 +3980,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
+ __ cmp(output_reg, 0x1);
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3988,9 +4020,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
+ __ cmp(output_reg, 0x1);
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -4004,9 +4036,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmp(output_reg, 0x80000000u);
+ __ cmp(output_reg, 0x1);
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -4025,7 +4057,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ RecordComment("Minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
- __ Set(output_reg, Immediate(0));
+ __ Move(output_reg, Immediate(0));
__ bind(&done);
}
@@ -4138,6 +4170,21 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label not_zero_input;
+ __ bsr(result, input);
+
+ __ j(not_zero, &not_zero_input);
+ __ Move(result, Immediate(63)); // 63^31 == 32
+
+ __ bind(&not_zero_input);
+ __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+}
+
+
void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
@@ -4189,10 +4236,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
// No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ mov(ebx, Immediate(undefined_value));
+ __ mov(ebx, isolate()->factory()->undefined_value());
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ Set(eax, Immediate(instr->arity()));
+ __ Move(eax, Immediate(instr->arity()));
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -4202,8 +4248,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
- __ Set(eax, Immediate(instr->arity()));
- __ mov(ebx, factory()->undefined_value());
+ __ Move(eax, Immediate(instr->arity()));
+ __ mov(ebx, isolate()->factory()->undefined_value());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -4291,18 +4337,17 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (FLAG_track_fields && representation.IsSmi()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsSmi(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsSmi(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32(operand_value)) {
+ if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
DeoptimizeIf(no_condition, instr->environment());
}
} else {
@@ -4310,6 +4355,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
__ test(value, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
}
} else if (representation.IsDouble()) {
@@ -4347,10 +4395,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
@@ -4398,8 +4442,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4504,7 +4547,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4650,7 +4693,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4743,7 +4786,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, Immediate(0));
+ __ Move(result, Immediate(0));
PushSafepointRegistersScope scope(this);
__ push(string);
@@ -4759,7 +4802,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
instr, instr->context());
__ AssertSmi(eax);
__ SmiUntag(eax);
@@ -4792,7 +4835,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
__ cmp(char_code, String::kMaxOneByteCharCode);
__ j(above, deferred->entry());
- __ Set(result, Immediate(factory()->single_character_string_cache()));
+ __ Move(result, Immediate(factory()->single_character_string_cache()));
__ mov(result, FieldOperand(result,
char_code, times_pointer_size,
FixedArray::kHeaderSize));
@@ -4809,7 +4852,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, Immediate(0));
+ __ Move(result, Immediate(0));
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
@@ -4848,16 +4891,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- Register input = ToRegister(instr->value());
- __ SmiTag(input);
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4877,17 +4910,6 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- Register input = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ SmiTag(input);
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
@@ -4896,7 +4918,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
+ NULL, SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4923,7 +4946,8 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4943,19 +4967,16 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register reg = ToRegister(value);
- Register tmp = reg.is(eax) ? ecx : eax;
+ Register tmp = ToRegister(temp1);
XMMRegister xmm_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label done;
-
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -4973,8 +4994,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ LoadUint32(xmm_scratch, reg,
- ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
+ __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2));
} else {
// There's no fild variant for unsigned values, so zero-extend to a 64-bit
// int manually.
@@ -4993,21 +5013,26 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ Move(reg, Immediate(0));
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- if (!reg.is(eax)) __ mov(reg, eax);
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, eax);
+ }
// Done. Put the value in xmm_scratch into the value of the allocated heap
// number.
@@ -5018,7 +5043,6 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
- __ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -5070,16 +5094,16 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
- __ Set(reg, Immediate(0));
+ __ Move(reg, Immediate(0));
PushSafepointRegistersScope scope(this);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(reg, eax);
@@ -5087,10 +5111,18 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(input));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ test(input, Immediate(0xc0000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ SmiTag(input);
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -5243,6 +5275,10 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
+ // The input was optimistically untagged; revert it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
+
if (instr->truncating()) {
Label no_heap_number, check_bools, check_false;
@@ -5258,21 +5294,20 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
// for truncating conversions.
__ cmp(input_reg, factory()->undefined_value());
__ j(not_equal, &check_bools, Label::kNear);
- __ Set(input_reg, Immediate(0));
+ __ Move(input_reg, Immediate(0));
__ jmp(done);
__ bind(&check_bools);
__ cmp(input_reg, factory()->true_value());
__ j(not_equal, &check_false, Label::kNear);
- __ Set(input_reg, Immediate(1));
+ __ Move(input_reg, Immediate(1));
__ jmp(done);
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
- __ Set(input_reg, Immediate(0));
- __ jmp(done);
+ __ Move(input_reg, Immediate(0));
} else {
Label bailout;
XMMRegister scratch = (instr->temp() != NULL)
@@ -5312,9 +5347,13 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
} else {
DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
-
- __ JumpIfNotSmi(input_reg, deferred->entry());
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ SmiUntag(input_reg);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ j(carry, deferred->entry());
__ bind(deferred->exit());
}
}
@@ -5746,6 +5785,45 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope2(masm(), SSE4_1);
+ __ pextrd(result_reg, value_reg, 1);
+ } else {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ pshufd(xmm_scratch, value_reg, 1);
+ __ movd(result_reg, xmm_scratch);
+ }
+ } else {
+ __ movd(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ XMMRegister result_reg = ToDoubleRegister(instr->result());
+ CpuFeatureScope scope(masm(), SSE2);
+
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope2(masm(), SSE4_1);
+ __ movd(result_reg, lo_reg);
+ __ pinsrd(result_reg, hi_reg, 1);
+ } else {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(result_reg, hi_reg);
+ __ psllq(result_reg, 32);
+ __ movd(xmm_scratch, lo_reg);
+ __ orps(result_reg, xmm_scratch);
+ }
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5820,7 +5898,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, Immediate(Smi::FromInt(0)));
+ __ Move(result, Immediate(Smi::FromInt(0)));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5848,7 +5926,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(Immediate(Smi::FromInt(flags)));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -5881,7 +5959,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->pattern()));
__ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -5893,7 +5971,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(ebx);
__ bind(&allocated);
@@ -5918,7 +5996,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -5927,7 +6005,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
__ push(Immediate(instr->hydrogen()->shared_info()));
__ push(Immediate(pretenure ? factory()->true_value()
: factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -6072,7 +6150,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6107,7 +6185,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -6148,10 +6226,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index fa5e88b033..079595cba5 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -148,9 +148,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -177,9 +179,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index d621bd261d..01821d95fa 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -309,7 +309,7 @@ void LGapResolver::EmitMove(int index) {
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToImmediate(constant_source, r));
+ __ Move(dst, cgen_->ToImmediate(constant_source, r));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
@@ -342,7 +342,7 @@ void LGapResolver::EmitMove(int index) {
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToImmediate(constant_source, r));
+ __ Move(dst, cgen_->ToImmediate(constant_source, r));
} else {
Register tmp = EnsureTempRegister();
__ LoadObject(tmp, cgen_->ToHandle(constant_source));
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index a9d49205ce..696c6be6e8 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -1005,30 +1005,22 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LInstruction* goto_instr = CheckElideControlInstruction(instr);
if (goto_instr != NULL) return goto_instr;
- ToBooleanStub::Types expected = instr->expected_input_types();
-
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
HValue* value = instr->value();
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
- if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
- return new(zone()) LBranch(UseRegister(value), NULL);
- }
-
- bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
- LOperand* temp = needs_temp ? TempRegister() : NULL;
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
- // The Generic stub does not have a deopt, so we need no environment.
- if (expected.IsGeneric()) {
- return new(zone()) LBranch(UseRegister(value), temp);
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
-
- // We need a temporary register when we have to access the map *or* we have
- // no type info yet, in which case we handle all cases (including the ones
- // involving maps).
- return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
+ return branch;
}
@@ -1195,6 +1187,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1220,8 +1213,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LOperand* context = UseAny(instr->context()); // Deferred use.
LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+ Representation r = instr->value()->representation();
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
@@ -1233,6 +1230,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@@ -1324,24 +1328,72 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+ dividend, divisor, temp1, temp2), edx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LDivI(
+ dividend, divisor, temp), eax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, eax));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1350,78 +1402,114 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LOperand* temp3 =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result =
+ DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+ divisor,
+ temp1,
+ temp2,
+ temp3),
+ edx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, eax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- // use dividend as temp if divisor < 0 && divisor != -1
- LOperand* dividend = divisor_si < -1 ? UseTempRegister(instr->left()) :
- UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
} else {
- // needs edx:eax, plus a temp
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* temp = TempRegister();
- LInstruction* result = DefineFixed(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp), edx);
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ return DoDivI(instr);
}
}
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+ dividend, divisor, temp1, temp2), eax);
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LModI(
+ dividend, divisor, temp), edx);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
-
if (instr->RightIsPowerOf2()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right),
- NULL);
- LInstruction* result = DefineSameAsFirst(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- return AssignEnvironment(DefineSameAsFirst(mod));
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LModI* mod = new(zone()) LModI(UseFixed(left, eax),
- UseRegister(right),
- FixedTemp(edx));
- LInstruction* result = DefineFixed(mod, edx);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1809,8 +1897,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
LOperand* temp = TempRegister();
- LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
- return AssignEnvironment(DefineAsRegister(res));
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LNumberUntagD(value, temp));
+ if (!instr->value()->representation().IsSmi()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -1829,8 +1921,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* xmm_temp =
(CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating)
? FixedTemp(xmm1) : NULL;
- LTaggedToI* res = new(zone()) LTaggedToI(UseRegister(val), xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
+ LInstruction* result = DefineSameAsFirst(
+ new(zone()) LTaggedToI(UseRegister(val), xmm_temp));
+ if (!instr->value()->representation().IsSmi()) {
+ // Note: Only deopts in deferred code.
+ result = AssignEnvironment(result);
+ }
+ return result;
}
}
} else if (from.IsDouble()) {
@@ -1854,35 +1951,37 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LDoubleToI(value, temp));
+ if (!truncating) result = AssignEnvironment(result);
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = CpuFeatures::IsSupported(SSE2) ? FixedTemp(xmm1)
- : NULL;
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = CpuFeatures::IsSupported(SSE2) ? FixedTemp(xmm1)
+ : NULL;
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineSameAsFirst(result));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LOperand* temp = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp);
+ return AssignPointerMap(DefineSameAsFirst(result));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
- : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1939,6 +2038,7 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
}
LCheckMaps* result = new(zone()) LCheckMaps(value);
if (!instr->CanOmitMapChecks()) {
+ // Note: Only deopts in deferred code.
AssignEnvironment(result);
if (instr->has_migration_target()) return AssignPointerMap(result);
}
@@ -1975,6 +2075,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -2033,7 +2147,10 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2049,7 +2166,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
temp = NULL;
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2091,11 +2211,11 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LInstruction* result = NULL;
if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
@@ -2103,15 +2223,20 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
(IsDoubleOrFloatElementsKind(instr->elements_kind()))));
LOperand* backing_store = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(backing_store, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UINT32_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((instr->elements_kind() == EXTERNAL_UINT32_ELEMENTS ||
+ instr->elements_kind() == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2274,7 +2399,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool can_be_constant = instr->value()->IsConstant() &&
HConstant::cast(instr->value())->NotInNewSpace() &&
- !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+ !instr->field_representation().IsDouble();
LOperand* val;
if (instr->field_representation().IsInteger8() ||
@@ -2286,10 +2411,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ } else if (instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2303,13 +2427,14 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field.
LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
- LStoreNamedField* result =
+ LInstruction* result =
new(zone()) LStoreNamedField(obj, val, temp, temp_map);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
+ if (!instr->access().IsExternalMemory() &&
+ instr->field_representation().IsHeapObject() &&
+ (val->IsConstantOperand()
+ ? HConstant::cast(instr->value())->HasSmiValue()
+ : !instr->value()->type().IsHeapObject())) {
+ result = AssignEnvironment(result);
}
return result;
}
@@ -2341,7 +2466,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index a36cf413e3..7964b7f6ee 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -82,17 +82,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -105,7 +111,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -126,14 +131,16 @@ class LCodeGen;
V(LoadRoot) \
V(MapEnumLength) \
V(MathAbs) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -172,7 +179,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@@ -633,6 +639,49 @@ class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LModByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -650,29 +699,52 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
+ LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
@@ -682,8 +754,55 @@ class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
};
@@ -782,6 +901,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value,
@@ -1884,19 +2015,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
@@ -1911,40 +2029,31 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- LNumberTagU(LOperand* value, LOperand* temp) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2023,6 +2132,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2105,7 +2215,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2162,7 +2272,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2391,6 +2501,33 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2610,6 +2747,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index faf768e11d..7847b3b398 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -214,22 +214,22 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
Register result_reg) {
Label done;
Label conv_failure;
- pxor(scratch_reg, scratch_reg);
+ xorps(scratch_reg, scratch_reg);
cvtsd2si(result_reg, input_reg);
test(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
- cmp(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
+ cmp(result_reg, Immediate(0x1));
+ j(overflow, &conv_failure, Label::kNear);
mov(result_reg, Immediate(0));
- setcc(above, result_reg);
+ setcc(sign, result_reg);
sub(result_reg, Immediate(1));
and_(result_reg, Immediate(255));
jmp(&done, Label::kNear);
bind(&conv_failure);
- Set(result_reg, Immediate(0));
+ Move(result_reg, Immediate(0));
ucomisd(input_reg, scratch_reg);
j(below, &done, Label::kNear);
- Set(result_reg, Immediate(255));
+ Move(result_reg, Immediate(255));
bind(&done);
}
@@ -256,8 +256,8 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2si(result_reg, Operand(input_reg));
- cmp(result_reg, 0x80000000u);
- j(not_equal, &done, Label::kNear);
+ cmp(result_reg, 0x1);
+ j(no_overflow, &done, Label::kNear);
sub(esp, Immediate(kDoubleSize));
movsd(MemOperand(esp, 0), input_reg);
@@ -374,8 +374,8 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
CpuFeatureScope scope(this, SSE2);
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
- cmp(result_reg, 0x80000000u);
- j(not_equal, &done, Label::kNear);
+ cmp(result_reg, 0x1);
+ j(no_overflow, &done, Label::kNear);
// Check if the input was 0x8000000 (kMinInt).
// If no, then we got an overflow and we deoptimize.
ExternalReference min_int = ExternalReference::address_of_min_int();
@@ -715,7 +715,7 @@ void MacroAssembler::RecordWrite(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- Set(eax, Immediate(0));
+ Move(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
@@ -729,20 +729,6 @@ void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
}
-void MacroAssembler::Set(Register dst, const Immediate& x) {
- if (x.is_zero()) {
- xor_(dst, dst); // Shorter than mov.
- } else {
- mov(dst, x);
- }
-}
-
-
-void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
- mov(dst, x);
-}
-
-
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
if (!RelocInfo::IsNone(x.rmode_)) return false;
@@ -750,12 +736,12 @@ bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
}
-void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
+void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- Set(dst, Immediate(x.x_ ^ jit_cookie()));
+ Move(dst, Immediate(x.x_ ^ jit_cookie()));
xor_(dst, jit_cookie());
} else {
- Set(dst, x);
+ Move(dst, x);
}
}
@@ -1037,6 +1023,20 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ cmp(FieldOperand(object, 0),
+ Immediate(isolate()->factory()->allocation_site_map()));
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -2244,7 +2244,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Set(eax, Immediate(num_arguments));
+ Move(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
: kDontSaveFPRegs);
@@ -2269,7 +2269,7 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Set(eax, Immediate(num_arguments));
+ Move(eax, Immediate(num_arguments));
JumpToExternalReference(ext);
}
@@ -2429,7 +2429,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
}
jmp(&exception_handled);
@@ -2689,41 +2689,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- mov(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::LoadGlobalContext(Register global_context) {
- // Load the global or builtins object from the current context.
- mov(global_context,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(global_context,
- FieldOperand(global_context, GlobalObject::kNativeContextOffset));
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
mov(function,
@@ -2868,6 +2833,37 @@ void MacroAssembler::Move(Register dst, Register src) {
}
+void MacroAssembler::Move(Register dst, const Immediate& x) {
+ if (x.is_zero()) {
+ xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
+ } else {
+ mov(dst, x);
+ }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
+ mov(dst, x);
+}
+
+
+void MacroAssembler::Move(XMMRegister dst, double val) {
+ // TODO(titzer): recognize double constants with ExternalReferences.
+ CpuFeatureScope scope(this, SSE2);
+ uint64_t int_val = BitCast<uint64_t, double>(val);
+ if (int_val == 0) {
+ xorps(dst, dst);
+ } else {
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ push(Immediate(upper));
+ push(Immediate(lower));
+ movsd(dst, Operand(esp, 0));
+ add(esp, Immediate(kDoubleSize));
+ }
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
@@ -2980,16 +2976,8 @@ void MacroAssembler::CheckStackAlignment() {
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -3002,16 +2990,15 @@ void MacroAssembler::Abort(BailoutReason reason) {
#endif
push(eax);
- push(Immediate(p0));
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
+ push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
int3();
@@ -3034,9 +3021,9 @@ void MacroAssembler::Throw(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// will not return here
int3();
@@ -3647,6 +3634,22 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
j(not_equal, &loop_again);
}
+
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(eax));
+ ASSERT(!dividend.is(edx));
+ MultiplierAndShift ms(divisor);
+ mov(eax, Immediate(ms.multiplier()));
+ imul(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
+ if (ms.shift() > 0) sar(edx, ms.shift());
+ mov(eax, dividend);
+ shr(eax, 31);
+ add(edx, eax);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 6807d082d8..698c81fe83 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -262,14 +262,6 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
- void LoadGlobalContext(Register global_context);
-
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -295,7 +287,7 @@ class MacroAssembler: public Assembler {
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
- Set(result, Immediate(object));
+ Move(result, Immediate(object));
}
}
@@ -358,9 +350,6 @@ class MacroAssembler: public Assembler {
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// Expression support
- void Set(Register dst, const Immediate& x);
- void Set(const Operand& dst, const Immediate& x);
-
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
// xorps to clear the dst register before cvtsi2sd to solve this issue.
@@ -369,7 +358,7 @@ class MacroAssembler: public Assembler {
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
- void SafeSet(Register dst, const Immediate& x);
+ void SafeMove(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
// Compare object type for heap object.
@@ -557,6 +546,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object);
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -851,6 +844,13 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
+ // Move a constant into a destination using the most efficient encoding.
+ void Move(Register dst, const Immediate& x);
+ void Move(const Operand& dst, const Immediate& x);
+
+ // Move an immediate into an XMM register.
+ void Move(XMMRegister dst, double val);
+
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
@@ -863,6 +863,10 @@ class MacroAssembler: public Assembler {
// Insert code to verify that the x87 stack has the specified depth (0-7)
void VerifyX87StackDepth(uint32_t depth);
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged, the result is in edx, and eax gets clobbered.
+ void TruncatingDiv(Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index d371c456c1..255df3285e 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -632,7 +632,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerIA32::Fail() {
STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
if (!global()) {
- __ Set(eax, Immediate(FAILURE));
+ __ Move(eax, Immediate(FAILURE));
}
__ jmp(&exit_label_);
}
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index a5b93b9b22..1a745c7b7f 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -283,7 +283,7 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
__ j(not_equal, miss);
// Load its initial map. The global functions all have initial maps.
- __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
+ __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
__ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -306,54 +306,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ test(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
-
- // Load length from the string and convert to a smi.
- __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -371,7 +323,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -422,13 +374,14 @@ static void CompileCallLoadPropertyWithInterceptor(
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
// when api call ICs are generated in hydrogen.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
// Copy return value.
__ pop(scratch_in);
// receiver
@@ -493,7 +446,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
- CallApiFunctionStub stub(true, call_data_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -572,11 +525,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ CmpObject(value_reg, constant);
__ j(not_equal, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
@@ -667,15 +620,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ mov(FieldOperand(receiver_reg, offset), storage_reg);
} else {
__ mov(FieldOperand(receiver_reg, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -691,15 +644,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ mov(FieldOperand(scratch1, offset), storage_reg);
} else {
__ mov(FieldOperand(scratch1, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -742,11 +695,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -793,7 +746,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
@@ -802,7 +755,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ mov(FieldOperand(receiver_reg, offset), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ mov(name_reg, value_reg);
@@ -821,7 +774,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch1, offset), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ mov(name_reg, value_reg);
@@ -860,9 +813,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, receiver_map);
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1066,15 +1016,6 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization,
- Handle<Map> receiver_map) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch1(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -1263,24 +1204,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch1(), 1, values);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -1288,30 +1211,26 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
- Register receiver = edx;
- Register value = eax;
// Save value register, so we can restore it later.
- __ push(value);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
}
__ push(receiver);
- __ push(value);
+ __ push(value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@@ -1355,6 +1274,20 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -1412,16 +1345,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
+Register StoreStubCompiler::value() {
+ return eax;
+}
+
+
Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { edx, ecx, eax, ebx, edi, no_reg };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, ebx, edi, no_reg };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { edx, ecx, eax, ebx, edi, no_reg };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, ebx, edi, no_reg };
return registers;
}
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index e0f807ce4b..ebe0fb9b35 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -50,12 +50,20 @@ Address IC::address() const {
// At least one break point is active perform additional test to ensure that
// break point locations are updated correctly.
- if (debug->IsDebugBreak(Assembler::target_address_at(result))) {
+ if (debug->IsDebugBreak(Assembler::target_address_at(result,
+ raw_constant_pool()))) {
// If the call site is a call to debug break then return the address in
// the original code instead of the address in the running code. This will
// cause the original code to be updated and keeps the breakpoint active in
// the running code.
- return OriginalCodeAddress();
+ Code* code = GetCode();
+ Code* original_code = GetOriginalCode();
+ intptr_t delta =
+ original_code->instruction_start() - code->instruction_start();
+ // Return the address in the original code. This is the place where
+ // the call which has been overwritten by the DebugBreakXXX resides
+ // and the place where the inline cache system should look.
+ return result + delta;
} else {
// No break point here just return the address of the call.
return result;
@@ -66,9 +74,45 @@ Address IC::address() const {
}
-Code* IC::GetTargetAtAddress(Address address) {
+ConstantPoolArray* IC::constant_pool() const {
+ if (!FLAG_enable_ool_constant_pool) {
+ return NULL;
+ } else {
+ Handle<ConstantPoolArray> result = raw_constant_pool_;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug* debug = isolate()->debug();
+ // First check if any break points are active if not just return the
+ // original constant pool.
+ if (!debug->has_break_points()) return *result;
+
+ // At least one break point is active perform additional test to ensure that
+ // break point locations are updated correctly.
+ Address target = Assembler::target_address_from_return_address(pc());
+ if (debug->IsDebugBreak(
+ Assembler::target_address_at(target, raw_constant_pool()))) {
+ // If the call site is a call to debug break then we want to return the
+ // constant pool for the original code instead of the breakpointed code.
+ return GetOriginalCode()->constant_pool();
+ }
+#endif
+ return *result;
+ }
+}
+
+
+ConstantPoolArray* IC::raw_constant_pool() const {
+ if (FLAG_enable_ool_constant_pool) {
+ return *raw_constant_pool_;
+ } else {
+ return NULL;
+ }
+}
+
+
+Code* IC::GetTargetAtAddress(Address address,
+ ConstantPoolArray* constant_pool) {
// Get the target address of the IC.
- Address target = Assembler::target_address_at(address);
+ Address target = Assembler::target_address_at(address, constant_pool);
// Convert target address to the code object. Code::GetCodeFromTargetAddress
// is safe for use during GC where the map might be marked.
Code* result = Code::GetCodeFromTargetAddress(target);
@@ -77,10 +121,12 @@ Code* IC::GetTargetAtAddress(Address address) {
}
-void IC::SetTargetAtAddress(Address address, Code* target) {
+void IC::SetTargetAtAddress(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
Heap* heap = target->GetHeap();
- Code* old_target = GetTargetAtAddress(address);
+ Code* old_target = GetTargetAtAddress(address, constant_pool);
#ifdef DEBUG
// STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
// ICs as strict mode. The strict-ness of the IC must be preserved.
@@ -90,7 +136,8 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
StoreIC::GetStrictMode(target->extra_ic_state()));
}
#endif
- Assembler::set_target_address_at(address, target->instruction_start());
+ Assembler::set_target_address_at(
+ address, constant_pool, target->instruction_start());
if (heap->gc_state() == Heap::MARK_COMPACT) {
heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
} else {
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 1e7997a80d..a327173629 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -127,6 +127,11 @@ IC::IC(FrameDepth depth, Isolate* isolate)
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
const Address entry =
Isolate::c_entry_fp(isolate->thread_local_top());
+ Address constant_pool = NULL;
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool = Memory::Address_at(
+ entry + ExitFrameConstants::kConstantPoolOffset);
+ }
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
@@ -134,6 +139,10 @@ IC::IC(FrameDepth depth, Isolate* isolate)
// StubFailureTrampoline, we need to look one frame further down the stack to
// find the frame pointer and the return address stack slot.
if (depth == EXTRA_CALL_FRAME) {
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool = Memory::Address_at(
+ fp + StandardFrameConstants::kConstantPoolOffset);
+ }
const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
@@ -145,18 +154,20 @@ IC::IC(FrameDepth depth, Isolate* isolate)
ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
#endif
fp_ = fp;
+ if (FLAG_enable_ool_constant_pool) {
+ raw_constant_pool_ = handle(
+ ConstantPoolArray::cast(reinterpret_cast<Object*>(constant_pool)),
+ isolate);
+ }
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
target_ = handle(raw_target(), isolate);
state_ = target_->ic_state();
- extra_ic_state_ = target_->needs_extended_extra_ic_state(target_->kind())
- ? target_->extended_extra_ic_state()
- : target_->extra_ic_state();
+ extra_ic_state_ = target_->extra_ic_state();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
-Address IC::OriginalCodeAddress() const {
- HandleScope scope(isolate());
+SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
// corresponding to the frame.
@@ -166,21 +177,25 @@ Address IC::OriginalCodeAddress() const {
// Find the function on the stack and both the active code for the
// function and the original code.
JSFunction* function = frame->function();
- Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+ return function->shared();
+}
+
+
+Code* IC::GetCode() const {
+ HandleScope scope(isolate());
+ Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
Code* code = shared->code();
+ return code;
+}
+
+
+Code* IC::GetOriginalCode() const {
+ HandleScope scope(isolate());
+ Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
ASSERT(Debug::HasDebugInfo(shared));
Code* original_code = Debug::GetDebugInfo(shared)->original_code();
ASSERT(original_code->IsCode());
- // Get the address of the call site in the active code. This is the
- // place where the call to DebugBreakXXX is and where the IC
- // normally would be.
- Address addr = Assembler::target_address_from_return_address(pc());
- // Return the address in the original code. This is the place where
- // the call which has been overwritten by the DebugBreakXXX resides
- // and the place where the inline cache system should look.
- intptr_t delta =
- original_code->instruction_start() - code->instruction_start();
- return addr + delta;
+ return original_code;
}
#endif
@@ -411,21 +426,26 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
}
-void IC::Clear(Isolate* isolate, Address address) {
- Code* target = GetTargetAtAddress(address);
+void IC::Clear(Isolate* isolate, Address address,
+ ConstantPoolArray* constant_pool) {
+ Code* target = GetTargetAtAddress(address, constant_pool);
// Don't clear debug break inline cache as it will remove the break point.
if (target->is_debug_stub()) return;
switch (target->kind()) {
- case Code::LOAD_IC: return LoadIC::Clear(isolate, address, target);
+ case Code::LOAD_IC:
+ return LoadIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_LOAD_IC:
- return KeyedLoadIC::Clear(isolate, address, target);
- case Code::STORE_IC: return StoreIC::Clear(isolate, address, target);
+ return KeyedLoadIC::Clear(isolate, address, target, constant_pool);
+ case Code::STORE_IC:
+ return StoreIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_STORE_IC:
- return KeyedStoreIC::Clear(isolate, address, target);
- case Code::COMPARE_IC: return CompareIC::Clear(isolate, address, target);
- case Code::COMPARE_NIL_IC: return CompareNilIC::Clear(address, target);
+ return KeyedStoreIC::Clear(isolate, address, target, constant_pool);
+ case Code::COMPARE_IC:
+ return CompareIC::Clear(isolate, address, target, constant_pool);
+ case Code::COMPARE_NIL_IC:
+ return CompareNilIC::Clear(address, target, constant_pool);
case Code::BINARY_OP_IC:
case Code::TO_BOOLEAN_IC:
// Clearing these is tricky and does not
@@ -436,40 +456,56 @@ void IC::Clear(Isolate* isolate, Address address) {
}
-void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
+void KeyedLoadIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
+ SetTargetAtAddress(address, *pre_monomorphic_stub(isolate), constant_pool);
}
-void LoadIC::Clear(Isolate* isolate, Address address, Code* target) {
+void LoadIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC(
Code::LOAD_IC, target->extra_ic_state());
- SetTargetAtAddress(address, code);
+ SetTargetAtAddress(address, code, constant_pool);
}
-void StoreIC::Clear(Isolate* isolate, Address address, Code* target) {
+void StoreIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC(
Code::STORE_IC, target->extra_ic_state());
- SetTargetAtAddress(address, code);
+ SetTargetAtAddress(address, code, constant_pool);
}
-void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) {
+void KeyedStoreIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
SetTargetAtAddress(address,
*pre_monomorphic_stub(
- isolate, StoreIC::GetStrictMode(target->extra_ic_state())));
+ isolate, StoreIC::GetStrictMode(target->extra_ic_state())),
+ constant_pool);
}
-void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
+void CompareIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
ASSERT(target->major_key() == CodeStub::CompareIC);
CompareIC::State handler_state;
Token::Value op;
@@ -477,7 +513,7 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
&handler_state, &op);
// Only clear CompareICs that can retain objects.
if (handler_state != KNOWN_OBJECT) return;
- SetTargetAtAddress(address, GetRawUninitialized(isolate, op));
+ SetTargetAtAddress(address, GetRawUninitialized(isolate, op), constant_pool);
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
@@ -500,31 +536,6 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
}
if (FLAG_use_ic) {
- // Use specialized code for getting the length of strings and
- // string wrapper objects. The length property of string wrapper
- // objects is read-only and therefore always returns the length of
- // the underlying string value. See ECMA-262 15.5.5.1.
- if (object->IsStringWrapper() &&
- name->Equals(isolate()->heap()->length_string())) {
- Handle<Code> stub;
- if (state() == UNINITIALIZED) {
- stub = pre_monomorphic_stub();
- } else if (state() == PREMONOMORPHIC || state() == MONOMORPHIC) {
- StringLengthStub string_length_stub(kind());
- stub = string_length_stub.GetCode(isolate());
- } else if (state() != MEGAMORPHIC) {
- ASSERT(state() != GENERIC);
- stub = megamorphic_stub();
- }
- if (!stub.is_null()) {
- set_target(*stub);
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
- }
- // Get the string if we have a string wrapper object.
- String* string = String::cast(JSValue::cast(*object)->value());
- return Smi::FromInt(string->length());
- }
-
// Use specialized code for getting prototype of functions.
if (object->IsJSFunction() &&
name->Equals(isolate()->heap()->prototype_string()) &&
@@ -553,7 +564,10 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
// Rewrite to the generic keyed load stub.
if (FLAG_use_ic) set_target(*generic_stub());
- return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
+ Handle<Object> result =
+ Runtime::GetElementOrCharAt(isolate(), object, index);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
@@ -610,28 +624,33 @@ bool IC::UpdatePolymorphicIC(Handle<HeapType> type,
TypeHandleList types;
CodeHandleList handlers;
- int number_of_valid_types;
- int handler_to_overwrite = -1;
-
target()->FindAllTypes(&types);
int number_of_types = types.length();
- number_of_valid_types = number_of_types;
+ int deprecated_types = 0;
+ int handler_to_overwrite = -1;
for (int i = 0; i < number_of_types; i++) {
Handle<HeapType> current_type = types.at(i);
- // Filter out deprecated maps to ensure their instances get migrated.
if (current_type->IsClass() && current_type->AsClass()->is_deprecated()) {
- number_of_valid_types--;
- // If the receiver type is already in the polymorphic IC, this indicates
- // there was a prototoype chain failure. In that case, just overwrite the
- // handler.
+ // Filter out deprecated maps to ensure their instances get migrated.
+ ++deprecated_types;
} else if (type->IsCurrently(current_type)) {
- ASSERT(handler_to_overwrite == -1);
- number_of_valid_types--;
+ // If the receiver type is already in the polymorphic IC, this indicates
+ // there was a prototoype chain failure. In that case, just overwrite the
+ // handler.
+ handler_to_overwrite = i;
+ } else if (handler_to_overwrite == -1 &&
+ current_type->IsClass() &&
+ type->IsClass() &&
+ IsTransitionOfMonomorphicTarget(*current_type->AsClass(),
+ *type->AsClass())) {
handler_to_overwrite = i;
}
}
+ int number_of_valid_types =
+ number_of_types - deprecated_types - (handler_to_overwrite != -1);
+
if (number_of_valid_types >= 4) return false;
if (number_of_types == 0) return false;
if (!target()->FindHandlers(&handlers, types.length())) return false;
@@ -639,13 +658,16 @@ bool IC::UpdatePolymorphicIC(Handle<HeapType> type,
number_of_valid_types++;
if (handler_to_overwrite >= 0) {
handlers.Set(handler_to_overwrite, code);
+ if (!type->IsCurrently(types.at(handler_to_overwrite))) {
+ types.Set(handler_to_overwrite, type);
+ }
} else {
types.Add(type);
handlers.Add(code);
}
Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- &types, &handlers, number_of_valid_types, name, extra_ic_state());
+ kind(), &types, &handlers, number_of_valid_types, name, extra_ic_state());
set_target(*ic);
return true;
}
@@ -697,7 +719,7 @@ void IC::UpdateMonomorphicIC(Handle<HeapType> type,
Handle<String> name) {
if (!handler->is_handler()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
- name, type, handler, extra_ic_state());
+ kind(), name, type, handler, extra_ic_state());
set_target(*ic);
}
@@ -713,19 +735,18 @@ void IC::CopyICToMegamorphicCache(Handle<String> name) {
}
-bool IC::IsTransitionOfMonomorphicTarget(Handle<HeapType> type) {
- if (!type->IsClass()) return false;
- Map* receiver_map = *type->AsClass();
- Map* current_map = target()->FindFirstMap();
- ElementsKind receiver_elements_kind = receiver_map->elements_kind();
+bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
+ if (source_map == NULL) return true;
+ if (target_map == NULL) return false;
+ ElementsKind target_elements_kind = target_map->elements_kind();
bool more_general_transition =
IsMoreGeneralElementsKindTransition(
- current_map->elements_kind(), receiver_elements_kind);
+ source_map->elements_kind(), target_elements_kind);
Map* transitioned_map = more_general_transition
- ? current_map->LookupElementsTransitionMap(receiver_elements_kind)
+ ? source_map->LookupElementsTransitionMap(target_elements_kind)
: NULL;
- return transitioned_map == receiver_map;
+ return transitioned_map == target_map;
}
@@ -738,17 +759,7 @@ void IC::PatchCache(Handle<HeapType> type,
case MONOMORPHIC_PROTOTYPE_FAILURE:
UpdateMonomorphicIC(type, code, name);
break;
- case MONOMORPHIC: {
- // For now, call stubs are allowed to rewrite to the same stub. This
- // happens e.g., when the field does not contain a function.
- ASSERT(!target().is_identical_to(code));
- Code* old_handler = target()->FindFirstHandler();
- if (old_handler == *code && IsTransitionOfMonomorphicTarget(type)) {
- UpdateMonomorphicIC(type, code, name);
- break;
- }
- // Fall through.
- }
+ case MONOMORPHIC: // Fall through.
case POLYMORPHIC:
if (!target()->is_keyed_stub()) {
if (UpdatePolymorphicIC(type, name, code)) break;
@@ -847,8 +858,11 @@ Handle<Code> IC::ComputeHandler(LookupResult* lookup,
isolate(), *object, cache_holder));
Handle<Code> code = isolate()->stub_cache()->FindHandler(
- name, handle(stub_holder->map()), kind(), cache_holder);
- if (!code.is_null()) return code;
+ name, handle(stub_holder->map()), kind(), cache_holder,
+ lookup->holder()->HasFastProperties() ? Code::FAST : Code::NORMAL);
+ if (!code.is_null()) {
+ return code;
+ }
code = CompileHandler(lookup, object, name, value, cache_holder);
ASSERT(code->is_handler());
@@ -871,6 +885,17 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
return SimpleFieldLoad(length_index);
}
+ if (object->IsStringWrapper() &&
+ name->Equals(isolate()->heap()->length_string())) {
+ if (kind() == Code::LOAD_IC) {
+ StringLengthStub string_length_stub;
+ return string_length_stub.GetCode(isolate());
+ } else {
+ KeyedStringLengthStub string_length_stub;
+ return string_length_stub.GetCode(isolate());
+ }
+ }
+
Handle<HeapType> type = CurrentTypeOf(object, isolate());
Handle<JSObject> holder(lookup->holder());
LoadStubCompiler compiler(isolate(), kNoExtraICState, cache_holder, kind());
@@ -942,8 +967,8 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
if (!object->IsJSObject() &&
!function->IsBuiltin() &&
- function->shared()->is_classic_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
+ function->shared()->strict_mode() == SLOPPY) {
+ // Calling sloppy non-builtins with a value as the receiver
// requires boxing.
break;
}
@@ -1063,26 +1088,25 @@ MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key) {
MaybeObject* maybe_object = NULL;
Handle<Code> stub = generic_stub();
- // Check for values that can be converted into an internalized string directly
- // or is representable as a smi.
+ // Check for non-string values that can be converted into an
+ // internalized string directly or is representable as a smi.
key = TryConvertKey(key, isolate());
if (key->IsInternalizedString()) {
maybe_object = LoadIC::Load(object, Handle<String>::cast(key));
if (maybe_object->IsFailure()) return maybe_object;
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
- ASSERT(!object->IsJSGlobalProxy());
if (object->IsString() && key->IsNumber()) {
if (state() == UNINITIALIZED) stub = string_stub();
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
+ isolate()->heap()->sloppy_arguments_elements_map()) {
+ stub = sloppy_arguments_stub();
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
} else if (!key->ToSmi()->IsFailure() &&
- (!target().is_identical_to(non_strict_arguments_stub()))) {
+ (!target().is_identical_to(sloppy_arguments_stub()))) {
stub = LoadElementStub(receiver);
}
}
@@ -1092,7 +1116,6 @@ MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key) {
if (*stub == *generic_stub()) {
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
}
- ASSERT(!stub.is_null());
set_target(*stub);
TRACE_IC("LoadIC", key);
}
@@ -1110,22 +1133,20 @@ static bool LookupForWrite(Handle<JSObject> receiver,
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
if (lookup->IsFound()) {
- if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
-
- if (lookup->holder() == *receiver) {
- if (lookup->IsInterceptor() && !HasInterceptorSetter(*receiver)) {
- receiver->LocalLookupRealNamedProperty(*name, lookup);
- return lookup->IsFound() &&
- !lookup->IsReadOnly() &&
- lookup->CanHoldValue(value) &&
- lookup->IsCacheable();
- }
- return lookup->CanHoldValue(value);
+ if (lookup->IsInterceptor() && !HasInterceptorSetter(lookup->holder())) {
+ receiver->LocalLookupRealNamedProperty(*name, lookup);
+ if (!lookup->IsFound()) return false;
}
+ if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
+ if (lookup->holder() == *receiver) return lookup->CanHoldValue(value);
if (lookup->IsPropertyCallbacks()) return true;
- // JSGlobalProxy always goes via the runtime, so it's safe to cache.
- if (receiver->IsJSGlobalProxy()) return true;
+ // JSGlobalProxy either stores on the global object in the prototype, or
+ // goes into the runtime if access checks are needed, so this is always
+ // safe.
+ if (receiver->IsJSGlobalProxy()) {
+ return lookup->holder() == receiver->GetPrototype();
+ }
// Currently normal holders in the prototype chain are not supported. They
// would require a runtime positive lookup and verification that the details
// have not changed.
@@ -1183,7 +1204,7 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
}
// The length property of string values is read-only. Throw in strict mode.
- if (strict_mode() == kStrictMode && object->IsString() &&
+ if (strict_mode() == STRICT && object->IsString() &&
name->Equals(isolate()->heap()->length_string())) {
return TypeError("strict_read_only_property", object, name);
}
@@ -1204,27 +1225,7 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
}
// Observed objects are always modified through the runtime.
- if (FLAG_harmony_observation && receiver->map()->is_observed()) {
- Handle<Object> result = JSReceiver::SetProperty(
- receiver, name, value, NONE, strict_mode(), store_mode);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *result;
- }
-
- // Use specialized code for setting the length of arrays with fast
- // properties. Slow properties might indicate redefinition of the length
- // property. Note that when redefined using Object.freeze, it's possible
- // to have fast properties but a read-only length.
- if (FLAG_use_ic &&
- receiver->IsJSArray() &&
- name->Equals(isolate()->heap()->length_string()) &&
- Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
- receiver->HasFastProperties() &&
- !receiver->map()->is_frozen()) {
- Handle<Code> stub =
- StoreArrayLengthStub(kind(), strict_mode()).GetCode(isolate());
- set_target(*stub);
- TRACE_IC("StoreIC", name);
+ if (receiver->map()->is_observed()) {
Handle<Object> result = JSReceiver::SetProperty(
receiver, name, value, NONE, strict_mode(), store_mode);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
@@ -1234,7 +1235,7 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
LookupResult lookup(isolate());
bool can_store = LookupForWrite(receiver, name, value, &lookup, this);
if (!can_store &&
- strict_mode() == kStrictMode &&
+ strict_mode() == STRICT &&
!(lookup.IsProperty() && lookup.IsReadOnly()) &&
object->IsGlobalObject()) {
// Strict mode doesn't allow setting non-existent global property.
@@ -1264,7 +1265,7 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
ExtraICState extra_state = ComputeExtraICState(strict_mode);
Handle<Code> ic = isolate->stub_cache()->ComputeStore(
UNINITIALIZED, extra_state);
@@ -1283,7 +1284,7 @@ Handle<Code> StoreIC::generic_stub() const {
Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
ExtraICState state = ComputeExtraICState(strict_mode);
return isolate->stub_cache()->ComputeStore(PREMONOMORPHIC, state);
}
@@ -1310,14 +1311,14 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
Handle<String> name,
Handle<Object> value,
InlineCacheHolderFlag cache_holder) {
- if (object->IsJSGlobalProxy()) return slow_stub();
+ if (object->IsAccessCheckNeeded()) return slow_stub();
ASSERT(cache_holder == OWN_MAP);
// This is currently guaranteed by checks in StoreIC::Store.
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<JSObject> holder(lookup->holder());
// Handlers do not use strict mode.
- StoreStubCompiler compiler(isolate(), kNonStrictMode, kind());
+ StoreStubCompiler compiler(isolate(), SLOPPY, kind());
switch (lookup->type()) {
case FIELD:
return compiler.CompileStoreField(receiver, lookup, name);
@@ -1334,17 +1335,19 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
}
case NORMAL:
if (kind() == Code::KEYED_STORE_IC) break;
- if (receiver->IsGlobalObject()) {
+ if (receiver->IsJSGlobalProxy() || receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+ Handle<GlobalObject> global = receiver->IsJSGlobalProxy()
+ ? handle(GlobalObject::cast(receiver->GetPrototype()))
+ : Handle<GlobalObject>::cast(receiver);
Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
- StoreGlobalStub stub(union_type->IsConstant());
-
+ StoreGlobalStub stub(
+ union_type->IsConstant(), receiver->IsJSGlobalProxy());
Handle<Code> code = stub.GetCodeCopyFromTemplate(
- isolate(), receiver->map(), *cell);
+ isolate(), global, cell);
// TODO(verwaest): Move caching of these NORMAL stubs outside as well.
HeapObject::UpdateMapCodeCache(receiver, name, code);
return code;
@@ -1352,7 +1355,6 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
ASSERT(holder.is_identical_to(receiver));
return isolate()->builtins()->StoreIC_Normal();
case CALLBACKS: {
- if (kind() == Code::KEYED_STORE_IC) break;
Handle<Object> callback(lookup->GetCallbackObject(), isolate());
if (callback->IsExecutableAccessorInfo()) {
Handle<ExecutableAccessorInfo> info =
@@ -1380,12 +1382,23 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
ASSERT(callback->IsForeign());
+
+ // Use specialized code for setting the length of arrays with fast
+ // properties. Slow properties might indicate redefinition of the length
+ // property.
+ if (receiver->IsJSArray() &&
+ name->Equals(isolate()->heap()->length_string()) &&
+ Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
+ receiver->HasFastProperties()) {
+ return compiler.CompileStoreArrayLength(receiver, lookup, name);
+ }
+
// No IC support for old-style native accessors.
break;
}
case INTERCEPTOR:
if (kind() == Code::KEYED_STORE_IC) break;
- ASSERT(HasInterceptorSetter(*receiver));
+ ASSERT(HasInterceptorSetter(*holder));
return compiler.CompileStoreInterceptor(receiver, name);
case CONSTANT:
break;
@@ -1439,9 +1452,10 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
if (IsTransitionStoreMode(store_mode)) {
transitioned_receiver_map = ComputeTransitionedMap(receiver, store_mode);
}
- if (receiver_map.is_identical_to(previous_receiver_map) ||
- IsTransitionOfMonomorphicTarget(
- MapToType<HeapType>(transitioned_receiver_map, isolate()))) {
+ if ((receiver_map.is_identical_to(previous_receiver_map) &&
+ IsTransitionStoreMode(store_mode)) ||
+ IsTransitionOfMonomorphicTarget(*previous_receiver_map,
+ *transitioned_receiver_map)) {
// If the "old" and "new" maps are in the same elements map family, or
// if they at least come from the same origin for a transitioning store,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
@@ -1575,7 +1589,10 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
key->ToSmi()->To(&smi_key);
int index = smi_key->value();
bool oob_access = IsOutOfBoundsAccess(receiver, index);
- bool allow_growth = receiver->IsJSArray() && oob_access;
+ // Don't consider this a growing store if the store would send the receiver to
+ // dictionary mode.
+ bool allow_growth = receiver->IsJSArray() && oob_access &&
+ !receiver->WouldConvertToSlowElements(key);
if (allow_growth) {
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiElements()) {
@@ -1655,8 +1672,8 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
return *result;
}
- // Check for values that can be converted into an internalized string directly
- // or is representable as a smi.
+ // Check for non-string values that can be converted into an
+ // internalized string directly or is representable as a smi.
key = TryConvertKey(key, isolate());
MaybeObject* maybe_object = NULL;
@@ -1669,8 +1686,10 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
JSReceiver::MAY_BE_STORE_FROM_KEYED);
if (maybe_object->IsFailure()) return maybe_object;
} else {
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
- !(FLAG_harmony_observation && object->IsJSObject() &&
+ bool use_ic = FLAG_use_ic &&
+ !object->IsAccessCheckNeeded() &&
+ !object->IsJSGlobalProxy() &&
+ !(object->IsJSObject() &&
JSObject::cast(*object)->map()->is_observed());
if (use_ic && !object->IsSmi()) {
// Don't use ICs for maps of the objects in Array's prototype chain. We
@@ -1681,16 +1700,18 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
}
if (use_ic) {
- ASSERT(!object->IsJSGlobalProxy());
+ ASSERT(!object->IsAccessCheckNeeded());
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
+ isolate()->heap()->sloppy_arguments_elements_map()) {
+ if (strict_mode() == SLOPPY) {
+ stub = sloppy_arguments_stub();
+ }
} else if (key_is_smi_like &&
- !(target().is_identical_to(non_strict_arguments_stub()))) {
+ !(target().is_identical_to(sloppy_arguments_stub()))) {
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
// other non-dictionary receivers in the polymorphic case benefit
@@ -1791,11 +1812,11 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure) {
RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- JSArray* receiver = JSArray::cast(args[0]);
- Object* len = args[1];
+ Handle<JSArray> receiver = args.at<JSArray>(0);
+ Handle<Object> len = args.at<Object>(1);
// The generated code should filter out non-Smis before we get here.
ASSERT(len->IsSmi());
@@ -1807,11 +1828,9 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly());
#endif
- Object* result;
- MaybeObject* maybe_result = receiver->SetElementsLength(len);
- if (!maybe_result->To(&result)) return maybe_result;
-
- return len;
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSArray::SetElementsLength(receiver, len));
+ return *len;
}
@@ -1843,14 +1862,12 @@ RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
Object* to_store = value;
- if (FLAG_track_double_fields) {
- DescriptorArray* descriptors = transition->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(transition->LastAdded());
- if (details.representation().IsDouble()) {
- MaybeObject* maybe_storage =
- isolate->heap()->AllocateHeapNumber(value->Number());
- if (!maybe_storage->To(&to_store)) return maybe_storage;
- }
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(transition->LastAdded());
+ if (details.representation().IsDouble()) {
+ MaybeObject* maybe_storage =
+ isolate->heap()->AllocateHeapNumber(value->Number());
+ if (!maybe_storage->To(&to_store)) return maybe_storage;
}
new_storage->set(old_storage->length(), to_store);
@@ -1894,7 +1911,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = ic.strict_mode();
+ StrictMode strict_mode = ic.strict_mode();
Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
value,
NONE,
@@ -1911,7 +1928,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = ic.strict_mode();
+ StrictMode strict_mode = ic.strict_mode();
Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
value,
NONE,
@@ -1929,7 +1946,7 @@ RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
Handle<Map> map = args.at<Map>(1);
Handle<Object> key = args.at<Object>(2);
Handle<Object> object = args.at<Object>(3);
- StrictModeFlag strict_mode = ic.strict_mode();
+ StrictMode strict_mode = ic.strict_mode();
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
@@ -2352,7 +2369,7 @@ const char* BinaryOpIC::State::KindToString(Kind kind) {
Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
switch (kind) {
case NONE: return Type::None(zone);
- case SMI: return Type::Smi(zone);
+ case SMI: return Type::SignedSmall(zone);
case INT32: return Type::Signed32(zone);
case NUMBER: return Type::Number(zone);
case STRING: return Type::String(zone);
@@ -2366,7 +2383,7 @@ Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
MaybeObject* BinaryOpIC::Transition(Handle<AllocationSite> allocation_site,
Handle<Object> left,
Handle<Object> right) {
- State state(target()->extended_extra_ic_state());
+ State state(target()->extra_ic_state());
// Compute the actual result using the builtin for the binary operation.
Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
@@ -2377,8 +2394,11 @@ MaybeObject* BinaryOpIC::Transition(Handle<AllocationSite> allocation_site,
isolate(), function, left, 1, &right, &caught_exception);
if (caught_exception) return Failure::Exception();
+ // Execution::Call can execute arbitrary JavaScript, hence potentially
+ // update the state of this very IC, so we must update the stored state.
+ UpdateTarget();
// Compute the new state.
- State old_state = state;
+ State old_state(target()->extra_ic_state());
state.Update(left, right, result);
// Check if we have a string operation here.
@@ -2495,7 +2515,7 @@ Type* CompareIC::StateToType(
Handle<Map> map) {
switch (state) {
case CompareIC::UNINITIALIZED: return Type::None(zone);
- case CompareIC::SMI: return Type::Smi(zone);
+ case CompareIC::SMI: return Type::SignedSmall(zone);
case CompareIC::NUMBER: return Type::Number(zone);
case CompareIC::STRING: return Type::String(zone);
case CompareIC::INTERNALIZED_STRING: return Type::InternalizedString(zone);
@@ -2680,9 +2700,11 @@ RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
}
-void CompareNilIC::Clear(Address address, Code* target) {
+void CompareNilIC::Clear(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
- ExtraICState state = target->extended_extra_ic_state();
+ ExtraICState state = target->extra_ic_state();
CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED);
stub.ClearState();
@@ -2690,7 +2712,7 @@ void CompareNilIC::Clear(Address address, Code* target) {
Code* code = NULL;
CHECK(stub.FindCodeInCache(&code, target->GetIsolate()));
- SetTargetAtAddress(address, code);
+ SetTargetAtAddress(address, code, constant_pool);
}
@@ -2704,7 +2726,7 @@ MaybeObject* CompareNilIC::DoCompareNilSlow(NilValue nil,
MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
- ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+ ExtraICState extra_ic_state = target()->extra_ic_state();
CompareNilICStub stub(extra_ic_state);
@@ -2788,7 +2810,7 @@ Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object) {
- ToBooleanStub stub(target()->extended_extra_ic_state());
+ ToBooleanStub stub(target()->extra_ic_state());
bool to_boolean_value = stub.UpdateStatus(object);
Handle<Code> code = stub.GetCode(isolate());
set_target(*code);
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 99309f4edf..e70cb82c96 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -101,7 +101,9 @@ class IC {
}
// Clear the inline cache to initial state.
- static void Clear(Isolate* isolate, Address address);
+ static void Clear(Isolate* isolate,
+ Address address,
+ ConstantPoolArray* constant_pool);
#ifdef DEBUG
bool IsLoadStub() const {
@@ -155,14 +157,17 @@ class IC {
Isolate* isolate() const { return isolate_; }
#ifdef ENABLE_DEBUGGER_SUPPORT
- // Computes the address in the original code when the code running is
- // containing break points (calls to DebugBreakXXX builtins).
- Address OriginalCodeAddress() const;
+ // Get the shared function info of the caller.
+ SharedFunctionInfo* GetSharedFunctionInfo() const;
+ // Get the code object of the caller.
+ Code* GetCode() const;
+ // Get the original (non-breakpointed) code object of the caller.
+ Code* GetOriginalCode() const;
#endif
// Set the call-site target.
void set_target(Code* code) {
- SetTargetAtAddress(address(), code);
+ SetTargetAtAddress(address(), code, constant_pool());
target_set_ = true;
}
@@ -180,8 +185,11 @@ class IC {
Failure* ReferenceError(const char* type, Handle<String> name);
// Access the target code for the given IC address.
- static inline Code* GetTargetAtAddress(Address address);
- static inline void SetTargetAtAddress(Address address, Code* target);
+ static inline Code* GetTargetAtAddress(Address address,
+ ConstantPoolArray* constant_pool);
+ static inline void SetTargetAtAddress(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
static void PostPatching(Address address, Code* target, Code* old_target);
// Compute the handler either by compiling or by retrieving a cached version.
@@ -209,7 +217,7 @@ class IC {
virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code);
void CopyICToMegamorphicCache(Handle<String> name);
- bool IsTransitionOfMonomorphicTarget(Handle<HeapType> type);
+ bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
void PatchCache(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code);
@@ -239,8 +247,17 @@ class IC {
extra_ic_state_ = state;
}
+ protected:
+ void UpdateTarget() {
+ target_ = handle(raw_target(), isolate_);
+ }
+
private:
- Code* raw_target() const { return GetTargetAtAddress(address()); }
+ Code* raw_target() const {
+ return GetTargetAtAddress(address(), constant_pool());
+ }
+ inline ConstantPoolArray* constant_pool() const;
+ inline ConstantPoolArray* raw_constant_pool() const;
// Frame pointer for the frame that uses (calls) the IC.
Address fp_;
@@ -253,6 +270,10 @@ class IC {
Isolate* isolate_;
+ // The constant pool of the code which originally called the IC (which might
+ // be for the breakpointed copy of the original code).
+ Handle<ConstantPoolArray> raw_constant_pool_;
+
// The original code target that missed.
Handle<Code> target_;
State state_;
@@ -320,8 +341,7 @@ class LoadIC: public IC {
GenerateMiss(masm);
}
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state);
+ static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
@@ -374,7 +394,10 @@ class LoadIC: public IC {
Representation representation =
Representation::Tagged());
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
friend class IC;
};
@@ -400,7 +423,7 @@ class KeyedLoadIC: public LoadIC {
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
static void GenerateIndexedInterceptor(MacroAssembler* masm);
- static void GenerateNonStrictArguments(MacroAssembler* masm);
+ static void GenerateSloppyArguments(MacroAssembler* masm);
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
@@ -437,14 +460,17 @@ class KeyedLoadIC: public LoadIC {
Handle<Code> indexed_interceptor_stub() {
return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
}
- Handle<Code> non_strict_arguments_stub() {
- return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
+ Handle<Code> sloppy_arguments_stub() {
+ return isolate()->builtins()->KeyedLoadIC_SloppyArguments();
}
Handle<Code> string_stub() {
return isolate()->builtins()->KeyedLoadIC_String();
}
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
friend class IC;
};
@@ -452,12 +478,11 @@ class KeyedLoadIC: public LoadIC {
class StoreIC: public IC {
public:
- class StrictModeState: public BitField<StrictModeFlag, 1, 1> {};
- static ExtraICState ComputeExtraICState(StrictModeFlag flag) {
+ class StrictModeState: public BitField<StrictMode, 1, 1> {};
+ static ExtraICState ComputeExtraICState(StrictMode flag) {
return StrictModeState::encode(flag);
}
-
- static StrictModeFlag GetStrictMode(ExtraICState state) {
+ static StrictMode GetStrictMode(ExtraICState state) {
return StrictModeState::decode(state);
}
@@ -471,7 +496,7 @@ class StoreIC: public IC {
ASSERT(IsStoreStub());
}
- StrictModeFlag strict_mode() const {
+ StrictMode strict_mode() const {
return StrictModeState::decode(extra_ic_state());
}
@@ -482,14 +507,13 @@ class StoreIC: public IC {
GenerateMiss(masm);
}
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state);
+ static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Code> initialize_stub(Isolate* isolate,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
MUST_USE_RESULT MaybeObject* Store(
Handle<Object> object,
@@ -514,7 +538,7 @@ class StoreIC: public IC {
}
static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -536,7 +560,10 @@ class StoreIC: public IC {
IC::set_target(code);
}
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
friend class IC;
};
@@ -561,7 +588,7 @@ class KeyedStoreIC: public StoreIC {
class ExtraICStateKeyedAccessStoreMode:
public BitField<KeyedAccessStoreMode, 2, 4> {}; // NOLINT
- static ExtraICState ComputeExtraICState(StrictModeFlag flag,
+ static ExtraICState ComputeExtraICState(StrictMode flag,
KeyedAccessStoreMode mode) {
return StrictModeState::encode(flag) |
ExtraICStateKeyedAccessStoreMode::encode(mode);
@@ -589,9 +616,9 @@ class KeyedStoreIC: public StoreIC {
static void GenerateMiss(MacroAssembler* masm);
static void GenerateSlow(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode);
- static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
- static void GenerateNonStrictArguments(MacroAssembler* masm);
+ StrictMode strict_mode);
+ static void GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode);
+ static void GenerateSloppyArguments(MacroAssembler* masm);
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
@@ -602,8 +629,8 @@ class KeyedStoreIC: public StoreIC {
return pre_monomorphic_stub(isolate(), strict_mode());
}
static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
- if (strict_mode == kStrictMode) {
+ StrictMode strict_mode) {
+ if (strict_mode == STRICT) {
return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
} else {
return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
@@ -613,7 +640,7 @@ class KeyedStoreIC: public StoreIC {
return isolate()->builtins()->KeyedStoreIC_Slow();
}
virtual Handle<Code> megamorphic_stub() {
- if (strict_mode() == kStrictMode) {
+ if (strict_mode() == STRICT) {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
} else {
return isolate()->builtins()->KeyedStoreIC_Generic();
@@ -632,18 +659,21 @@ class KeyedStoreIC: public StoreIC {
// Stub accessors.
virtual Handle<Code> generic_stub() const {
- if (strict_mode() == kStrictMode) {
+ if (strict_mode() == STRICT) {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
} else {
return isolate()->builtins()->KeyedStoreIC_Generic();
}
}
- Handle<Code> non_strict_arguments_stub() {
- return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
+ Handle<Code> sloppy_arguments_stub() {
+ return isolate()->builtins()->KeyedStoreIC_SloppyArguments();
}
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
Handle<Object> key,
@@ -850,7 +880,10 @@ class CompareIC: public IC {
static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
Token::Value op_;
@@ -866,7 +899,9 @@ class CompareNilIC: public IC {
static Handle<Code> GetUninitialized();
- static void Clear(Address address, Code* target);
+ static void Clear(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
static MUST_USE_RESULT MaybeObject* DoCompareNilSlow(NilValue nil,
Handle<Object> object);
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
index b9bd65edc6..1fff8170ff 100644
--- a/deps/v8/src/icu_util.cc
+++ b/deps/v8/src/icu_util.cc
@@ -27,12 +27,21 @@
#include "icu_util.h"
-#if defined(_WIN32) && defined(V8_I18N_SUPPORT)
+#if defined(_WIN32)
#include <windows.h>
+#endif
+
+#if defined(V8_I18N_SUPPORT)
+#include <stdio.h>
+#include <stdlib.h>
#include "unicode/putil.h"
#include "unicode/udata.h"
+#define ICU_UTIL_DATA_FILE 0
+#define ICU_UTIL_DATA_SHARED 1
+#define ICU_UTIL_DATA_STATIC 2
+
#define ICU_UTIL_DATA_SYMBOL "icudt" U_ICU_VERSION_SHORT "_dat"
#define ICU_UTIL_DATA_SHARED_MODULE_NAME "icudt.dll"
#endif
@@ -41,8 +50,22 @@ namespace v8 {
namespace internal {
-bool InitializeICU() {
-#if defined(_WIN32) && defined(V8_I18N_SUPPORT)
+#if defined(V8_I18N_SUPPORT) && (ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE)
+namespace {
+char* g_icu_data_ptr = NULL;
+
+void free_icu_data_ptr() {
+ delete[] g_icu_data_ptr;
+}
+
+} // namespace
+#endif
+
+bool InitializeICU(const char* icu_data_file) {
+#if !defined(V8_I18N_SUPPORT)
+ return true;
+#else
+#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_SHARED
// We expect to find the ICU data module alongside the current module.
HMODULE module = LoadLibraryA(ICU_UTIL_DATA_SHARED_MODULE_NAME);
if (!module) return false;
@@ -53,9 +76,36 @@ bool InitializeICU() {
UErrorCode err = U_ZERO_ERROR;
udata_setCommonData(reinterpret_cast<void*>(addr), &err);
return err == U_ZERO_ERROR;
-#else
+#elif ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_STATIC
// Mac/Linux bundle the ICU data in.
return true;
+#elif ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+ if (!icu_data_file) return false;
+
+ if (g_icu_data_ptr) return true;
+
+ FILE* inf = fopen(icu_data_file, "rb");
+ if (!inf) return false;
+
+ fseek(inf, 0, SEEK_END);
+ size_t size = ftell(inf);
+ rewind(inf);
+
+ g_icu_data_ptr = new char[size];
+ if (fread(g_icu_data_ptr, 1, size, inf) != size) {
+ delete[] g_icu_data_ptr;
+ g_icu_data_ptr = NULL;
+ fclose(inf);
+ return false;
+ }
+ fclose(inf);
+
+ atexit(free_icu_data_ptr);
+
+ UErrorCode err = U_ZERO_ERROR;
+ udata_setCommonData(reinterpret_cast<void*>(g_icu_data_ptr), &err);
+ return err == U_ZERO_ERROR;
+#endif
#endif
}
diff --git a/deps/v8/src/icu_util.h b/deps/v8/src/icu_util.h
index 478abce508..6b50c185c5 100644
--- a/deps/v8/src/icu_util.h
+++ b/deps/v8/src/icu_util.h
@@ -35,7 +35,7 @@ namespace internal {
// Call this function to load ICU's data tables for the current process. This
// function should be called before ICU is used.
-bool InitializeICU();
+bool InitializeICU(const char* icu_data_file);
} } // namespace v8::internal
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index 1b9a28a5b7..bbe0c51a58 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -83,28 +83,6 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
Isolate* isolate) {
ASSERT(obj->IsHeapObject());
IncrementalMarking* marking = isolate->heap()->incremental_marking();
- ASSERT(!marking->is_compacting_);
-
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- int counter = chunk->write_barrier_counter();
- if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
- marking->write_barriers_invoked_since_last_step_ +=
- MemoryChunk::kWriteBarrierCounterGranularity -
- chunk->write_barrier_counter();
- chunk->set_write_barrier_counter(
- MemoryChunk::kWriteBarrierCounterGranularity);
- }
-
- marking->RecordWrite(obj, slot, *slot);
-}
-
-
-void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate) {
- ASSERT(obj->IsHeapObject());
- IncrementalMarking* marking = isolate->heap()->incremental_marking();
- ASSERT(marking->is_compacting_);
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
int counter = chunk->write_barrier_counter();
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
index d47c300ef3..f4362ff5da 100644
--- a/deps/v8/src/incremental-marking.h
+++ b/deps/v8/src/incremental-marking.h
@@ -100,7 +100,7 @@ class IncrementalMarking {
// Do some marking every time this much memory has been allocated or that many
// heavy (color-checking) write barriers have been invoked.
static const intptr_t kAllocatedThreshold = 65536;
- static const intptr_t kWriteBarriersInvokedThreshold = 65536;
+ static const intptr_t kWriteBarriersInvokedThreshold = 32768;
// Start off by marking this many times more memory than has been allocated.
static const intptr_t kInitialMarkingSpeed = 1;
// But if we are promoting a lot of data we need to mark faster to keep up
@@ -129,10 +129,6 @@ class IncrementalMarking {
Object** slot,
Isolate* isolate);
- static void RecordWriteForEvacuationFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate);
-
// Record a slot for compaction. Returns false for objects that are
// guaranteed to be rescanned or not guaranteed to survive.
//
diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc
index 2fc9fd3025..de54d0c426 100644
--- a/deps/v8/src/interpreter-irregexp.cc
+++ b/deps/v8/src/interpreter-irregexp.cc
@@ -158,25 +158,12 @@ static int32_t Load16Aligned(const byte* pc) {
// matching terminates.
class BacktrackStack {
public:
- explicit BacktrackStack(Isolate* isolate) : isolate_(isolate) {
- if (isolate->irregexp_interpreter_backtrack_stack_cache() != NULL) {
- // If the cache is not empty reuse the previously allocated stack.
- data_ = isolate->irregexp_interpreter_backtrack_stack_cache();
- isolate->set_irregexp_interpreter_backtrack_stack_cache(NULL);
- } else {
- // Cache was empty. Allocate a new backtrack stack.
- data_ = NewArray<int>(kBacktrackStackSize);
- }
+ explicit BacktrackStack() {
+ data_ = NewArray<int>(kBacktrackStackSize);
}
~BacktrackStack() {
- if (isolate_->irregexp_interpreter_backtrack_stack_cache() == NULL) {
- // The cache is empty. Keep this backtrack stack around.
- isolate_->set_irregexp_interpreter_backtrack_stack_cache(data_);
- } else {
- // A backtrack stack was already cached, just release this one.
- DeleteArray(data_);
- }
+ DeleteArray(data_);
}
int* data() const { return data_; }
@@ -187,7 +174,6 @@ class BacktrackStack {
static const int kBacktrackStackSize = 10000;
int* data_;
- Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
@@ -204,7 +190,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
// BacktrackStack ensures that the memory allocated for the backtracking stack
// is returned to the system or cached if there is no stack being cached at
// the moment.
- BacktrackStack backtrack_stack(isolate);
+ BacktrackStack backtrack_stack;
int* backtrack_stack_base = backtrack_stack.data();
int* backtrack_sp = backtrack_stack_base;
int backtrack_stack_space = backtrack_stack.max_size();
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 8a2f4219c7..7e06a2ed5f 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -80,10 +80,6 @@ int ThreadId::GetCurrentThreadId() {
ThreadLocalTop::ThreadLocalTop() {
InitializeInternal();
- // This flag may be set using v8::V8::IgnoreOutOfMemoryException()
- // before an isolate is initialized. The initialize methods below do
- // not touch it to preserve its value.
- ignore_out_of_memory_ = false;
}
@@ -453,10 +449,10 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
// If the caller parameter is a function we skip frames until we're
// under it before starting to collect.
bool seen_caller = !caller->IsJSFunction();
- // First element is reserved to store the number of non-strict frames.
+ // First element is reserved to store the number of sloppy frames.
int cursor = 1;
int frames_seen = 0;
- int non_strict_frames = 0;
+ int sloppy_frames = 0;
bool encountered_strict_function = false;
for (StackFrameIterator iter(this);
!iter.done() && frames_seen < limit;
@@ -487,13 +483,13 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
// The stack trace API should not expose receivers and function
// objects on frames deeper than the top-most one with a strict
- // mode function. The number of non-strict frames is stored as
+ // mode function. The number of sloppy frames is stored as
// first element in the result array.
if (!encountered_strict_function) {
- if (!fun->shared()->is_classic_mode()) {
+ if (fun->shared()->strict_mode() == STRICT) {
encountered_strict_function = true;
} else {
- non_strict_frames++;
+ sloppy_frames++;
}
}
elements->set(cursor++, *recv);
@@ -503,7 +499,7 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
}
}
}
- elements->set(0, Smi::FromInt(non_strict_frames));
+ elements->set(0, Smi::FromInt(sloppy_frames));
Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(cursor));
return result;
@@ -778,7 +774,7 @@ static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
+ ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
// The callers of this method are not expecting a GC.
DisallowHeapAllocation no_gc;
@@ -829,7 +825,7 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
bool Isolate::MayIndexedAccess(JSObject* receiver,
uint32_t index,
v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
+ ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
ASSERT(context());
@@ -946,10 +942,17 @@ Failure* Isolate::ReThrow(MaybeObject* exception) {
Failure* Isolate::ThrowIllegalOperation() {
+ if (FLAG_stack_trace_on_illegal) PrintStack(stdout);
return Throw(heap_.illegal_access_string());
}
+Failure* Isolate::ThrowInvalidStringLength() {
+ return Throw(*factory()->NewRangeError(
+ "invalid_string_length", HandleVector<Object>(NULL, 0)));
+}
+
+
void Isolate::ScheduleThrow(Object* exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
@@ -1122,8 +1125,6 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// while the bootstrapper is active since the infrastructure may not have
// been properly initialized.
if (!bootstrapping) {
- Handle<String> stack_trace;
- if (FLAG_trace_exception) stack_trace = StackTraceString();
Handle<JSArray> stack_trace_object;
if (capture_stack_trace_for_uncaught_exceptions_) {
if (IsErrorObject(exception_handle)) {
@@ -1163,7 +1164,6 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
"uncaught_exception",
location,
HandleVector<Object>(&exception_arg, 1),
- stack_trace,
stack_trace_object);
thread_local_top()->pending_message_obj_ = *message_obj;
if (location != NULL) {
@@ -1269,14 +1269,8 @@ void Isolate::ReportPendingMessages() {
ASSERT(has_pending_exception());
PropagatePendingExceptionToExternalTryCatch();
- // If the pending exception is OutOfMemoryException set out_of_memory in
- // the native context. Note: We have to mark the native context here
- // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
- // set it.
HandleScope scope(this);
- if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
- context()->mark_out_of_memory();
- } else if (thread_local_top_.pending_exception_ ==
+ if (thread_local_top_.pending_exception_ ==
heap()->termination_exception()) {
// Do nothing: if needed, the exception has been already propagated to
// v8::TryCatch.
@@ -1307,8 +1301,7 @@ void Isolate::ReportPendingMessages() {
MessageLocation Isolate::GetMessageLocation() {
ASSERT(has_pending_exception());
- if (!thread_local_top_.pending_exception_->IsOutOfMemory() &&
- thread_local_top_.pending_exception_ != heap()->termination_exception() &&
+ if (thread_local_top_.pending_exception_ != heap()->termination_exception() &&
thread_local_top_.has_pending_message_ &&
!thread_local_top_.pending_message_obj_->IsTheHole() &&
!thread_local_top_.pending_message_obj_->IsTheHole()) {
@@ -1327,39 +1320,36 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
ASSERT(has_pending_exception());
PropagatePendingExceptionToExternalTryCatch();
- // Always reschedule out of memory exceptions.
- if (!is_out_of_memory()) {
- bool is_termination_exception =
- pending_exception() == heap_.termination_exception();
+ bool is_termination_exception =
+ pending_exception() == heap_.termination_exception();
- // Do not reschedule the exception if this is the bottom call.
- bool clear_exception = is_bottom_call;
+ // Do not reschedule the exception if this is the bottom call.
+ bool clear_exception = is_bottom_call;
- if (is_termination_exception) {
- if (is_bottom_call) {
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- } else if (thread_local_top()->external_caught_exception_) {
- // If the exception is externally caught, clear it if there are no
- // JavaScript frames on the way to the C++ frame that has the
- // external handler.
- ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
- JavaScriptFrameIterator it(this);
- if (it.done() || (it.frame()->sp() > external_handler_address)) {
- clear_exception = true;
- }
- }
-
- // Clear the exception if needed.
- if (clear_exception) {
+ if (is_termination_exception) {
+ if (is_bottom_call) {
thread_local_top()->external_caught_exception_ = false;
clear_pending_exception();
return false;
}
+ } else if (thread_local_top()->external_caught_exception_) {
+ // If the exception is externally caught, clear it if there are no
+ // JavaScript frames on the way to the C++ frame that has the
+ // external handler.
+ ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
+ JavaScriptFrameIterator it(this);
+ if (it.done() || (it.frame()->sp() > external_handler_address)) {
+ clear_exception = true;
+ }
+ }
+
+ // Clear the exception if needed.
+ if (clear_exception) {
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ return false;
}
// Reschedule the exception.
@@ -1379,23 +1369,6 @@ void Isolate::SetCaptureStackTraceForUncaughtExceptions(
}
-bool Isolate::is_out_of_memory() {
- if (has_pending_exception()) {
- MaybeObject* e = pending_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- if (has_scheduled_exception()) {
- MaybeObject* e = scheduled_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- return false;
-}
-
-
Handle<Context> Isolate::native_context() {
return Handle<Context>(context()->global_object()->native_context());
}
@@ -1465,6 +1438,13 @@ Isolate::ThreadDataTable::~ThreadDataTable() {
}
+Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
+#if defined(USE_SIMULATOR)
+ delete simulator_;
+#endif
+}
+
+
Isolate::PerIsolateThreadData*
Isolate::ThreadDataTable::Lookup(Isolate* isolate,
ThreadId thread_id) {
@@ -1545,7 +1525,6 @@ Isolate::Isolate()
global_handles_(NULL),
eternal_handles_(NULL),
thread_manager_(NULL),
- fp_stubs_generated_(false),
has_installed_extensions_(false),
string_tracker_(NULL),
regexp_stack_(NULL),
@@ -1565,8 +1544,8 @@ Isolate::Isolate()
optimizing_compiler_thread_(NULL),
sweeper_thread_(NULL),
num_sweeper_threads_(0),
- max_available_threads_(0),
- stress_deopt_count_(0) {
+ stress_deopt_count_(0),
+ next_optimization_id_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -1581,18 +1560,9 @@ Isolate::Isolate()
thread_manager_ = new ThreadManager();
thread_manager_->isolate_ = this;
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__)
- simulator_initialized_ = false;
- simulator_i_cache_ = NULL;
- simulator_redirection_ = NULL;
-#endif
-
#ifdef DEBUG
// heap_histograms_ initializes itself.
memset(&js_spill_information_, 0, sizeof(js_spill_information_));
- memset(code_kind_statistics_, 0,
- sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1672,6 +1642,10 @@ void Isolate::Deinit() {
delete[] sweeper_thread_;
sweeper_thread_ = NULL;
+ if (FLAG_job_based_sweeping &&
+ heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+ heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
+ }
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
@@ -1846,9 +1820,7 @@ void Isolate::PropagatePendingExceptionToExternalTryCatch() {
if (!external_caught) return;
- if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
- // Do not propagate OOM exception: we should kill VM asap.
- } else if (thread_local_top_.pending_exception_ ==
+ if (thread_local_top_.pending_exception_ ==
heap()->termination_exception()) {
try_catch_handler()->can_continue_ = false;
try_catch_handler()->has_terminated_ = true;
@@ -1919,7 +1891,7 @@ bool Isolate::Init(Deserializer* des) {
}
// The initialization process does not handle memory exhaustion.
- DisallowAllocationFailure disallow_allocation_failure;
+ DisallowAllocationFailure disallow_allocation_failure(this);
InitializeLoggingAndCounters();
@@ -1967,7 +1939,7 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
Simulator::Initialize(this);
#endif
#endif
@@ -2005,6 +1977,12 @@ bool Isolate::Init(Deserializer* des) {
bootstrapper_->Initialize(create_heap_objects);
builtins_.SetUp(this, create_heap_objects);
+ if (FLAG_log_internal_timer_events) {
+ set_event_logger(Logger::LogInternalEvents);
+ } else {
+ set_event_logger(Logger::EmptyLogInternalEvents);
+ }
+
// Set default value if not yet set.
// TODO(yangguo): move this to ResourceConstraints::ConfigureDefaults
// once ResourceConstraints becomes an argument to the Isolate constructor.
@@ -2013,7 +1991,10 @@ bool Isolate::Init(Deserializer* des) {
max_available_threads_ = Max(Min(CPU::NumberOfProcessorsOnline(), 4), 1);
}
- num_sweeper_threads_ = SweeperThread::NumberOfThreads(max_available_threads_);
+ if (!FLAG_job_based_sweeping) {
+ num_sweeper_threads_ =
+ SweeperThread::NumberOfThreads(max_available_threads_);
+ }
if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
PrintF("Concurrent recompilation has been disabled for tracing.\n");
@@ -2099,17 +2080,14 @@ bool Isolate::Init(Deserializer* des) {
CodeStub::GenerateFPStubs(this);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
StubFailureTrampolineStub::GenerateAheadOfTime(this);
- // TODO(mstarzinger): The following is an ugly hack to make sure the
- // interface descriptor is initialized even when stubs have been
- // deserialized out of the snapshot without the graph builder.
- FastCloneShallowArrayStub stub(FastCloneShallowArrayStub::CLONE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE, 0);
- stub.InitializeInterfaceDescriptor(
- this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
+ // Ensure interface descriptors are initialized even when stubs have been
+ // deserialized out of the snapshot without using the graph builder.
+ FastCloneShallowArrayStub::InstallDescriptors(this);
BinaryOpICStub::InstallDescriptors(this);
BinaryOpWithAllocationSiteStub::InstallDescriptors(this);
- CompareNilICStub::InitializeForIsolate(this);
- ToBooleanStub::InitializeForIsolate(this);
+ CompareNilICStub::InstallDescriptors(this);
+ ToBooleanStub::InstallDescriptors(this);
+ ToNumberStub::InstallDescriptors(this);
ArrayConstructorStubBase::InstallDescriptors(this);
InternalArrayConstructorStubBase::InstallDescriptors(this);
FastNewClosureStub::InstallDescriptors(this);
@@ -2318,4 +2296,25 @@ ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif
+
+Handle<JSObject> Isolate::GetSymbolRegistry() {
+ if (heap()->symbol_registry()->IsUndefined()) {
+ Handle<Map> map = factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ Handle<JSObject> registry = factory()->NewJSObjectFromMap(map);
+ heap()->set_symbol_registry(*registry);
+
+ static const char* nested[] = {
+ "for", "for_api", "for_intern", "keyFor", "private_api", "private_intern"
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(nested); ++i) {
+ Handle<String> name = factory()->InternalizeUtf8String(nested[i]);
+ Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
+ JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8);
+ JSObject::SetProperty(registry, name, obj, NONE, STRICT);
+ }
+ }
+ return Handle<JSObject>::cast(factory()->symbol_registry());
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index d93a862294..b4713786ab 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -102,6 +102,7 @@ class DebuggerAgent;
#endif
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
class Redirection;
class Simulator;
@@ -145,7 +146,6 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
do { \
ASSERT(!(isolate)->has_pending_exception()); \
CHECK(!(call).is_null()); \
- CHECK(!(isolate)->has_pending_exception()); \
} while (false)
#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
@@ -207,6 +207,11 @@ class ThreadId {
};
+#define FIELD_ACCESSOR(type, name) \
+ inline void set_##name(type v) { name##_ = v; } \
+ inline type name() const { return name##_; }
+
+
class ThreadLocalTop BASE_EMBEDDED {
public:
// Does early low-level initialization that does not depend on the
@@ -233,14 +238,7 @@ class ThreadLocalTop BASE_EMBEDDED {
// stack, try_catch_handler_address returns a JS stack address that
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
- inline Address try_catch_handler_address() {
- return try_catch_handler_address_;
- }
-
- // Set the address of the top C++ try catch handler.
- inline void set_try_catch_handler_address(Address address) {
- try_catch_handler_address_ = address;
- }
+ FIELD_ACCESSOR(Address, try_catch_handler_address)
void Free() {
ASSERT(!has_pending_message_);
@@ -290,9 +288,6 @@ class ThreadLocalTop BASE_EMBEDDED {
// Head of the list of live LookupResults.
LookupResult* top_lookup_result_;
- // Whether out of memory exceptions should be ignored.
- bool ignore_out_of_memory_;
-
private:
void InitializeInternal();
@@ -310,11 +305,28 @@ class ThreadLocalTop BASE_EMBEDDED {
#endif
+
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__)
+
+#define ISOLATE_INIT_SIMULATOR_LIST(V) \
+ V(bool, simulator_initialized, false) \
+ V(HashMap*, simulator_i_cache, NULL) \
+ V(Redirection*, simulator_redirection, NULL)
+#else
+
+#define ISOLATE_INIT_SIMULATOR_LIST(V)
+
+#endif
+
+
#ifdef DEBUG
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
V(CommentStatistic, paged_space_comments_statistics, \
- CommentStatistic::kMaxComments + 1)
+ CommentStatistic::kMaxComments + 1) \
+ V(int, code_kind_statistics, Code::NUMBER_OF_KINDS)
#else
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
@@ -341,31 +353,39 @@ typedef List<HeapObject*> DebugObjectCache;
/* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
V(byte*, assembler_spare_buffer, NULL) \
V(FatalErrorCallback, exception_behavior, NULL) \
+ V(LogEventCallback, event_logger, NULL) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
V(int, next_serial_number, 0) \
V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
- V(bool, always_allow_natives_syntax, false) \
/* Part of the state of liveedit. */ \
V(FunctionInfoListener*, active_function_info_listener, NULL) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, NULL) \
V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
V(Object*, string_stream_current_security_token, NULL) \
- /* TODO(isolates): Release this on destruction? */ \
- V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
/* Serializer state. */ \
V(ExternalReferenceTable*, external_reference_table, NULL) \
/* AstNode state. */ \
V(int, ast_node_id, 0) \
V(unsigned, ast_node_count, 0) \
- V(bool, microtask_pending, false) \
+ V(bool, microtask_pending, false) \
+ V(bool, autorun_microtasks, true) \
V(HStatistics*, hstatistics, NULL) \
V(HTracer*, htracer, NULL) \
V(CodeTracer*, code_tracer, NULL) \
+ V(bool, fp_stubs_generated, false) \
+ V(int, max_available_threads, 0) \
+ V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
+ ISOLATE_INIT_SIMULATOR_LIST(V) \
ISOLATE_DEBUGGER_INIT_LIST(V)
+#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
+ inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
+ inline type name() const { return thread_local_top_.name##_; }
+
+
class Isolate {
// These forward declarations are required to make the friend declarations in
// PerIsolateThreadData work on some older versions of gcc.
@@ -385,24 +405,23 @@ class Isolate {
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
simulator_(NULL),
#endif
next_(NULL),
prev_(NULL) { }
+ ~PerIsolateThreadData();
Isolate* isolate() const { return isolate_; }
ThreadId thread_id() const { return thread_id_; }
- void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
- uintptr_t stack_limit() const { return stack_limit_; }
- ThreadState* thread_state() const { return thread_state_; }
- void set_thread_state(ThreadState* value) { thread_state_ = value; }
+
+ FIELD_ACCESSOR(uintptr_t, stack_limit)
+ FIELD_ACCESSOR(ThreadState*, thread_state)
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
- Simulator* simulator() const { return simulator_; }
- void set_simulator(Simulator* simulator) {
- simulator_ = simulator;
- }
+ FIELD_ACCESSOR(Simulator*, simulator)
#endif
bool Matches(Isolate* isolate, ThreadId thread_id) const {
@@ -416,6 +435,7 @@ class Isolate {
ThreadState* thread_state_;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
@@ -541,38 +561,35 @@ class Isolate {
}
Context** context_address() { return &thread_local_top_.context_; }
- SaveContext* save_context() { return thread_local_top_.save_context_; }
- void set_save_context(SaveContext* save) {
- thread_local_top_.save_context_ = save;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
// Access to current thread id.
- ThreadId thread_id() { return thread_local_top_.thread_id_; }
- void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
+ THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
// Interface to pending exception.
MaybeObject* pending_exception() {
ASSERT(has_pending_exception());
return thread_local_top_.pending_exception_;
}
- bool external_caught_exception() {
- return thread_local_top_.external_caught_exception_;
- }
- void set_external_caught_exception(bool value) {
- thread_local_top_.external_caught_exception_ = value;
- }
+
void set_pending_exception(MaybeObject* exception) {
thread_local_top_.pending_exception_ = exception;
}
+
void clear_pending_exception() {
thread_local_top_.pending_exception_ = heap_.the_hole_value();
}
+
MaybeObject** pending_exception_address() {
return &thread_local_top_.pending_exception_;
}
+
bool has_pending_exception() {
return !thread_local_top_.pending_exception_->IsTheHole();
}
+
+ THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
+
void clear_pending_message() {
thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
@@ -587,12 +604,8 @@ class Isolate {
bool* external_caught_exception_address() {
return &thread_local_top_.external_caught_exception_;
}
- v8::TryCatch* catcher() {
- return thread_local_top_.catcher_;
- }
- void set_catcher(v8::TryCatch* catcher) {
- thread_local_top_.catcher_ = catcher;
- }
+
+ THREAD_LOCAL_TOP_ACCESSOR(v8::TryCatch*, catcher)
MaybeObject** scheduled_exception_address() {
return &thread_local_top_.scheduled_exception_;
@@ -625,8 +638,7 @@ class Isolate {
bool IsExternallyCaught();
bool is_catchable_by_javascript(MaybeObject* exception) {
- return (!exception->IsOutOfMemory()) &&
- (exception != heap()->termination_exception());
+ return exception != heap()->termination_exception();
}
// Serializer.
@@ -705,16 +717,6 @@ class Isolate {
int frame_limit,
StackTrace::StackTraceOptions options);
- // Tells whether the current context has experienced an out of memory
- // exception.
- bool is_out_of_memory();
- bool ignore_out_of_memory() {
- return thread_local_top_.ignore_out_of_memory_;
- }
- void set_ignore_out_of_memory(bool value) {
- thread_local_top_.ignore_out_of_memory_ = value;
- }
-
void PrintCurrentStackTrace(FILE* out);
void PrintStack(StringStream* accumulator);
void PrintStack(FILE* out);
@@ -747,6 +749,10 @@ class Isolate {
v8::AccessType type) {
return MayIndexedAccess(*receiver, index, type);
}
+ void ReportFailedAccessCheckWrapper(Handle<JSObject> receiver,
+ v8::AccessType type) {
+ ReportFailedAccessCheck(*receiver, type);
+ }
bool MayNamedAccess(JSObject* receiver,
Object* key,
@@ -773,6 +779,7 @@ class Isolate {
// Return pending location if any or unfilled structure.
MessageLocation GetMessageLocation();
Failure* ThrowIllegalOperation();
+ Failure* ThrowInvalidStringLength();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
Failure* PromoteScheduledException();
@@ -938,12 +945,6 @@ class Isolate {
RuntimeState* runtime_state() { return &runtime_state_; }
- void set_fp_stubs_generated(bool value) {
- fp_stubs_generated_ = value;
- }
-
- bool fp_stubs_generated() { return fp_stubs_generated_; }
-
Builtins* builtins() { return &builtins_; }
void NotifyExtensionInstalled() {
@@ -989,48 +990,15 @@ class Isolate {
JSObject::SpillInformation* js_spill_information() {
return &js_spill_information_;
}
-
- int* code_kind_statistics() { return code_kind_statistics_; }
-#endif
-
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__)
- bool simulator_initialized() { return simulator_initialized_; }
- void set_simulator_initialized(bool initialized) {
- simulator_initialized_ = initialized;
- }
-
- HashMap* simulator_i_cache() { return simulator_i_cache_; }
- void set_simulator_i_cache(HashMap* hash_map) {
- simulator_i_cache_ = hash_map;
- }
-
- Redirection* simulator_redirection() {
- return simulator_redirection_;
- }
- void set_simulator_redirection(Redirection* redirection) {
- simulator_redirection_ = redirection;
- }
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
static const int kJSRegexpStaticOffsetsVectorSize = 128;
- ExternalCallbackScope* external_callback_scope() {
- return thread_local_top_.external_callback_scope_;
- }
- void set_external_callback_scope(ExternalCallbackScope* scope) {
- thread_local_top_.external_callback_scope_ = scope;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
- StateTag current_vm_state() {
- return thread_local_top_.current_vm_state_;
- }
-
- void set_current_vm_state(StateTag state) {
- thread_local_top_.current_vm_state_ = state;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
void SetData(uint32_t slot, void* data) {
ASSERT(slot < Internals::kNumIsolateDataSlots);
@@ -1041,12 +1009,7 @@ class Isolate {
return embedder_data_[slot];
}
- LookupResult* top_lookup_result() {
- return thread_local_top_.top_lookup_result_;
- }
- void SetTopLookupResult(LookupResult* top) {
- thread_local_top_.top_lookup_result_ = top;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(LookupResult*, top_lookup_result)
bool IsDead() { return has_fatal_error_; }
void SignalFatalError() { has_fatal_error_ = true; }
@@ -1096,14 +1059,6 @@ class Isolate {
bool IsDeferredHandle(Object** location);
#endif // DEBUG
- int max_available_threads() const {
- return max_available_threads_;
- }
-
- void set_max_available_threads(int value) {
- max_available_threads_ = value;
- }
-
bool concurrent_recompilation_enabled() {
// Thread is only available with flag enabled.
ASSERT(optimizing_compiler_thread_ == NULL ||
@@ -1153,6 +1108,17 @@ class Isolate {
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
+ int NextOptimizationId() {
+ int id = next_optimization_id_++;
+ if (!Smi::IsValid(next_optimization_id_)) {
+ next_optimization_id_ = 0;
+ }
+ return id;
+ }
+
+ // Get (and lazily initialize) the registry for per-isolate symbols.
+ Handle<JSObject> GetSymbolRegistry();
+
private:
Isolate();
@@ -1299,7 +1265,6 @@ class Isolate {
EternalHandles* eternal_handles_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
- bool fp_stubs_generated_;
Builtins builtins_;
bool has_installed_extensions_;
StringTracker* string_tracker_;
@@ -1329,18 +1294,10 @@ class Isolate {
// Time stamp at initialization.
double time_millis_at_init_;
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__)
- bool simulator_initialized_;
- HashMap* simulator_i_cache_;
- Redirection* simulator_redirection_;
-#endif
-
#ifdef DEBUG
// A static array of histogram info for each type.
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
- int code_kind_statistics_[Code::NUMBER_OF_KINDS];
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1377,13 +1334,11 @@ class Isolate {
SweeperThread** sweeper_thread_;
int num_sweeper_threads_;
- // TODO(yangguo): This will become obsolete once ResourceConstraints
- // becomes an argument to Isolate constructor.
- int max_available_threads_;
-
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
+ int next_optimization_id_;
+
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
@@ -1403,6 +1358,10 @@ class Isolate {
};
+#undef FIELD_ACCESSOR
+#undef THREAD_LOCAL_TOP_ACCESSOR
+
+
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
@@ -1509,17 +1468,6 @@ class PostponeInterruptsScope BASE_EMBEDDED {
};
-// Tells whether the native context is marked with out of memory.
-inline bool Context::has_out_of_memory() {
- return native_context()->out_of_memory()->IsTrue();
-}
-
-
-// Mark the native context with out of memory.
-inline void Context::mark_out_of_memory() {
- native_context()->set_out_of_memory(GetIsolate()->heap()->true_value());
-}
-
class CodeTracer V8_FINAL : public Malloced {
public:
explicit CodeTracer(int isolate_id)
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 72c69100d1..4c2b479182 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -361,7 +361,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
Handle<Object> value = ParseJsonValue();
if (value.is_null()) return ReportUnexpectedCharacter();
- JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
+ JSObject::SetOwnElement(json_object, index, value, SLOPPY);
continue;
}
// Not an index, fallback to the slow path.
@@ -414,9 +414,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
if (value->FitsRepresentation(expected_representation)) {
// If the target representation is double and the value is already
// double, use the existing box.
- if (FLAG_track_double_fields &&
- value->IsSmi() &&
- expected_representation.IsDouble()) {
+ if (value->IsSmi() && expected_representation.IsDouble()) {
value = factory()->NewHeapNumber(
Handle<Smi>::cast(value)->value());
}
@@ -608,6 +606,7 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
Handle<StringType> seq_string =
NewRawString<StringType>(factory(), length, pretenure_);
+ ASSERT(!seq_string.is_null());
// Copy prefix into seq_str.
SinkChar* dest = seq_string->GetChars();
String::WriteToFlat(*prefix, dest, start, end);
@@ -795,6 +794,7 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
} while (c0_ != '"');
int length = position_ - beg_pos;
Handle<String> result = factory()->NewRawOneByteString(length, pretenure_);
+ ASSERT(!result.is_null());
uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
String::WriteToFlat(*source_, dest, beg_pos, position_);
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 4510c4b45b..3926969f65 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -51,6 +51,8 @@ class BasicJsonStringifier BASE_EMBEDDED {
enum Result { UNCHANGED, SUCCESS, EXCEPTION, CIRCULAR, STACK_OVERFLOW };
+ void Accumulate();
+
void Extend();
void ChangeEncoding();
@@ -178,6 +180,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
int current_index_;
int part_length_;
bool is_ascii_;
+ bool overflowed_;
static const int kJsonEscapeTableEntrySize = 8;
static const char* const JsonEscapeTable;
@@ -254,12 +257,16 @@ const char* const BasicJsonStringifier::JsonEscapeTable =
BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
- : isolate_(isolate), current_index_(0), is_ascii_(true) {
+ : isolate_(isolate),
+ current_index_(0),
+ is_ascii_(true),
+ overflowed_(false) {
factory_ = isolate_->factory();
accumulator_store_ = Handle<JSValue>::cast(
factory_->ToObject(factory_->empty_string()));
part_length_ = kInitialPartLength;
current_part_ = factory_->NewRawOneByteString(part_length_);
+ ASSERT(!current_part_.is_null());
tojson_string_ = factory_->toJSON_string();
stack_ = factory_->NewJSArray(8);
}
@@ -269,9 +276,12 @@ MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) {
switch (SerializeObject(object)) {
case UNCHANGED:
return isolate_->heap()->undefined_value();
- case SUCCESS:
+ case SUCCESS: {
ShrinkCurrentPart();
- return *factory_->NewConsString(accumulator(), current_part_);
+ Accumulate();
+ if (overflowed_) return isolate_->ThrowInvalidStringLength();
+ return *accumulator();
+ }
case CIRCULAR:
return isolate_->Throw(*factory_->NewTypeError(
"circular_structure", HandleVector<Object>(NULL, 0)));
@@ -300,6 +310,7 @@ MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
if (object->IsOneByteRepresentationUnderneath()) {
Handle<String> result =
isolate->factory()->NewRawOneByteString(worst_case_length);
+ ASSERT(!result.is_null());
DisallowHeapAllocation no_gc;
return StringifyString_<SeqOneByteString>(
isolate,
@@ -308,6 +319,7 @@ MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
} else {
Handle<String> result =
isolate->factory()->NewRawTwoByteString(worst_case_length);
+ ASSERT(!result.is_null());
DisallowHeapAllocation no_gc;
return StringifyString_<SeqTwoByteString>(
isolate,
@@ -381,13 +393,16 @@ BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
if (check.HasOverflowed()) return STACK_OVERFLOW;
int length = Smi::cast(stack_->length())->value();
- FixedArray* elements = FixedArray::cast(stack_->elements());
- for (int i = 0; i < length; i++) {
- if (elements->get(i) == *object) {
- return CIRCULAR;
+ {
+ DisallowHeapAllocation no_allocation;
+ FixedArray* elements = FixedArray::cast(stack_->elements());
+ for (int i = 0; i < length; i++) {
+ if (elements->get(i) == *object) {
+ return CIRCULAR;
+ }
}
}
- stack_->EnsureSize(length + 1);
+ JSArray::EnsureSize(stack_, length + 1);
FixedArray::cast(stack_->elements())->set(length, *object);
stack_->set_length(Smi::FromInt(length + 1));
return SUCCESS;
@@ -486,7 +501,9 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
part_length_ = kInitialPartLength; // Allocate conservatively.
Extend(); // Attach current part and allocate new part.
// Attach result string to the accumulator.
- set_accumulator(factory_->NewConsString(accumulator(), result_string));
+ Handle<String> cons = factory_->NewConsString(accumulator(), result_string);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, cons, EXCEPTION);
+ set_accumulator(cons);
return SUCCESS;
}
@@ -655,7 +672,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
isolate_);
} else {
property = GetProperty(isolate_, object, key);
- if (property.is_null()) return EXCEPTION;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, property, EXCEPTION);
}
Result result = SerializeProperty(property, comma, key);
if (!comma && result == SUCCESS) comma = true;
@@ -687,7 +704,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
property = GetProperty(isolate_, object, key_handle);
}
}
- if (property.is_null()) return EXCEPTION;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, property, EXCEPTION);
Result result = SerializeProperty(property, comma, key_handle);
if (!comma && result == SUCCESS) comma = true;
if (result >= EXCEPTION) return result;
@@ -708,8 +725,19 @@ void BasicJsonStringifier::ShrinkCurrentPart() {
}
+void BasicJsonStringifier::Accumulate() {
+ if (accumulator()->length() + current_part_->length() > String::kMaxLength) {
+ // Screw it. Simply set the flag and carry on. Throw exception at the end.
+ set_accumulator(factory_->empty_string());
+ overflowed_ = true;
+ } else {
+ set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ }
+}
+
+
void BasicJsonStringifier::Extend() {
- set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ Accumulate();
if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
part_length_ *= kPartLengthGrowthFactor;
}
@@ -718,14 +746,16 @@ void BasicJsonStringifier::Extend() {
} else {
current_part_ = factory_->NewRawTwoByteString(part_length_);
}
+ ASSERT(!current_part_.is_null());
current_index_ = 0;
}
void BasicJsonStringifier::ChangeEncoding() {
ShrinkCurrentPart();
- set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ Accumulate();
current_part_ = factory_->NewRawTwoByteString(part_length_);
+ ASSERT(!current_part_.is_null());
current_index_ = 0;
is_ascii_ = false;
}
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index c21e6351d4..fc4b58deca 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -210,6 +210,28 @@ function JSONStringify(value, replacer, space) {
} else {
gap = "";
}
+ if (IS_ARRAY(replacer)) {
+ // Deduplicate replacer array items.
+ var property_list = new InternalArray();
+ var seen_properties = { __proto__: null };
+ var seen_sentinel = {};
+ var length = replacer.length;
+ for (var i = 0; i < length; i++) {
+ var item = replacer[i];
+ if (IS_STRING_WRAPPER(item)) {
+ item = ToString(item);
+ } else {
+ if (IS_NUMBER_WRAPPER(item)) item = ToNumber(item);
+ if (IS_NUMBER(item)) item = %_NumberToString(item);
+ }
+ if (IS_STRING(item) && seen_properties[item] != seen_sentinel) {
+ property_list.push(item);
+ // We cannot use true here because __proto__ needs to be an object.
+ seen_properties[item] = seen_sentinel;
+ }
+ }
+ replacer = property_list;
+ }
return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index edd2eacd3d..a30fc26ff0 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -49,6 +49,8 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -464,6 +466,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
// Unable to compile regexp.
Handle<String> error_message =
isolate->factory()->NewStringFromUtf8(CStrVector(result.error_message));
+ ASSERT(!error_message.is_null());
CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
return false;
}
@@ -688,7 +691,8 @@ Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
int32_t* match) {
ASSERT(last_match_info->HasFastObjectElements());
int capture_register_count = (capture_count + 1) * 2;
- last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
+ JSArray::EnsureSize(last_match_info,
+ capture_register_count + kLastMatchOverhead);
DisallowHeapAllocation no_allocation;
FixedArray* array = FixedArray::cast(last_match_info->elements());
if (match != NULL) {
@@ -3597,9 +3601,12 @@ class AlternativeGenerationList {
// The '2' variant is has inclusive from and exclusive to.
-static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0,
- 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B, 0x2028, 0x202A,
- 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, 0x10000 };
+// This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
+// which include WhiteSpace (7.2) or LineTerminator (7.3) values.
+static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1,
+ 0x00A0, 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B,
+ 0x2028, 0x202A, 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001,
+ 0xFEFF, 0xFF00, 0x10000 };
static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
static const int kWordRanges[] = {
@@ -6085,9 +6092,14 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#elif V8_TARGET_ARCH_ARM64
+ RegExpMacroAssemblerARM64 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#else
+#error "Unsupported architecture"
#endif
#else // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 877b3a63e7..5c48832344 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -50,6 +50,8 @@ class DefaultPlatform : public Platform {
void SetThreadPoolSize(int thread_pool_size);
+ void EnsureInitialized();
+
// v8::Platform implementation.
virtual void CallOnBackgroundThread(
Task *task, ExpectedRuntime expected_runtime) V8_OVERRIDE;
@@ -59,8 +61,6 @@ class DefaultPlatform : public Platform {
private:
static const int kMaxThreadPoolSize = 4;
- void EnsureInitialized();
-
Mutex lock_;
bool initialized_;
int thread_pool_size_;
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h
index deee98877d..7c0cba7fba 100644
--- a/deps/v8/src/lithium-allocator-inl.h
+++ b/deps/v8/src/lithium-allocator-inl.h
@@ -34,6 +34,8 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 48fa862c90..9987161d4d 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -35,6 +35,8 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index 9908ea823d..8a1476a04b 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -47,16 +47,12 @@ class HValue;
class BitVector;
class StringStream;
-class LArgument;
class LPlatformChunk;
class LOperand;
class LUnallocated;
-class LConstantOperand;
class LGap;
class LParallelMove;
class LPointerMap;
-class LStackSlot;
-class LRegister;
// This class represents a single point of a LOperand's lifetime.
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
index 2d71d13c69..be0ff8371a 100644
--- a/deps/v8/src/lithium-codegen.cc
+++ b/deps/v8/src/lithium-codegen.cc
@@ -38,6 +38,9 @@
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
@@ -104,11 +107,9 @@ bool LCodeGenBase::GenerateBody() {
GenerateBodyInstructionPre(instr);
HValue* value = instr->hydrogen_value();
- if (value->position() != RelocInfo::kNoPosition) {
- ASSERT(!graph()->info()->IsOptimizing() ||
- !FLAG_emit_opt_code_positions ||
- value->position() != RelocInfo::kNoPosition);
- RecordAndWritePosition(value->position());
+ if (!value->position().IsUnknown()) {
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
}
instr->CompileToNative(codegen);
@@ -141,13 +142,15 @@ void LCodeGenBase::Comment(const char* format, ...) {
int LCodeGenBase::GetNextEmittedBlock() const {
for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
+ if (!graph()->blocks()->at(i)->IsReachable()) continue;
if (!chunk_->GetLabel(i)->HasReplacement()) return i;
}
return -1;
}
-void LCodeGenBase::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+void LCodeGenBase::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
+ ASSERT(code->is_optimized_code());
ZoneList<Handle<Map> > maps(1, zone());
ZoneList<Handle<JSObject> > objects(1, zone());
ZoneList<Handle<Cell> > cells(1, zone());
@@ -156,11 +159,11 @@ void LCodeGenBase::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::CELL &&
- Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_cell())) {
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
Handle<Cell> cell(it.rinfo()->target_cell());
cells.Add(cell, zone());
} else if (mode == RelocInfo::EMBEDDED_OBJECT &&
- Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
if (it.rinfo()->target_object()->IsMap()) {
Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h
index f6806781de..3e8d471ea7 100644
--- a/deps/v8/src/lithium-codegen.h
+++ b/deps/v8/src/lithium-codegen.h
@@ -66,7 +66,7 @@ class LCodeGenBase BASE_EMBEDDED {
int GetNextEmittedBlock() const;
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
+ void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
protected:
enum Status {
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index b4f96290c7..8753ff14aa 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -41,6 +41,9 @@
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
#else
#error "Unknown architecture."
#endif
@@ -108,39 +111,40 @@ void LOperand::PrintTo(StringStream* stream) {
case DOUBLE_REGISTER:
stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
break;
- case ARGUMENT:
- stream->Add("[arg:%d]", index());
- break;
}
}
-#define DEFINE_OPERAND_CACHE(name, type) \
- L##name* L##name::cache = NULL; \
- \
- void L##name::SetUpCache() { \
- if (cache) return; \
- cache = new L##name[kNumCachedOperands]; \
- for (int i = 0; i < kNumCachedOperands; i++) { \
- cache[i].ConvertTo(type, i); \
- } \
- } \
- \
- void L##name::TearDownCache() { \
- delete[] cache; \
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+LSubKindOperand<kOperandKind, kNumCachedOperands>*
+LSubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+ if (cache) return;
+ cache = new LSubKindOperand[kNumCachedOperands];
+ for (int i = 0; i < kNumCachedOperands; i++) {
+ cache[i].ConvertTo(kOperandKind, i);
}
+}
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+ delete[] cache;
+}
-LITHIUM_OPERAND_LIST(DEFINE_OPERAND_CACHE)
-#undef DEFINE_OPERAND_CACHE
void LOperand::SetUpCaches() {
-#define LITHIUM_OPERAND_SETUP(name, type) L##name::SetUpCache();
+#define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
#undef LITHIUM_OPERAND_SETUP
}
void LOperand::TearDownCaches() {
-#define LITHIUM_OPERAND_TEARDOWN(name, type) L##name::TearDownCache();
+#define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
#undef LITHIUM_OPERAND_TEARDOWN
}
@@ -442,6 +446,7 @@ Handle<Code> LChunk::Codegen() {
CodeGenerator::PrintCode(code, info());
return code;
}
+ assembler.AbortedCodeGeneration();
return Handle<Code>::null();
}
@@ -495,10 +500,9 @@ LEnvironment* LChunkBuilderBase::CreateEnvironment(
LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
+ CHECK(!value->IsPushArgument()); // Do not deopt outgoing arguments
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
} else {
op = UseAny(value);
}
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 754f88da82..8ae5b879dc 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -35,12 +35,12 @@
namespace v8 {
namespace internal {
-#define LITHIUM_OPERAND_LIST(V) \
- V(ConstantOperand, CONSTANT_OPERAND) \
- V(StackSlot, STACK_SLOT) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT) \
- V(Register, REGISTER) \
- V(DoubleRegister, DOUBLE_REGISTER)
+#define LITHIUM_OPERAND_LIST(V) \
+ V(ConstantOperand, CONSTANT_OPERAND, 128) \
+ V(StackSlot, STACK_SLOT, 128) \
+ V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
+ V(Register, REGISTER, 16) \
+ V(DoubleRegister, DOUBLE_REGISTER, 16)
class LOperand : public ZoneObject {
@@ -52,20 +52,18 @@ class LOperand : public ZoneObject {
STACK_SLOT,
DOUBLE_STACK_SLOT,
REGISTER,
- DOUBLE_REGISTER,
- ARGUMENT
+ DOUBLE_REGISTER
};
LOperand() : value_(KindField::encode(INVALID)) { }
Kind kind() const { return KindField::decode(value_); }
int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
-#define LITHIUM_OPERAND_PREDICATE(name, type) \
+#define LITHIUM_OPERAND_PREDICATE(name, type, number) \
bool Is##name() const { return kind() == type; }
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE)
- LITHIUM_OPERAND_PREDICATE(Argument, ARGUMENT)
- LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
- LITHIUM_OPERAND_PREDICATE(Ignored, INVALID)
+ LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+ LITHIUM_OPERAND_PREDICATE(Ignored, INVALID, 0)
#undef LITHIUM_OPERAND_PREDICATE
bool Equals(LOperand* other) const { return value_ == other->value_; }
@@ -317,140 +315,35 @@ class LMoveOperands V8_FINAL BASE_EMBEDDED {
};
-class LConstantOperand V8_FINAL : public LOperand {
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+class LSubKindOperand V8_FINAL : public LOperand {
public:
- static LConstantOperand* Create(int index, Zone* zone) {
+ static LSubKindOperand* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LConstantOperand(index);
+ return new(zone) LSubKindOperand(index);
}
- static LConstantOperand* cast(LOperand* op) {
- ASSERT(op->IsConstantOperand());
- return reinterpret_cast<LConstantOperand*>(op);
+ static LSubKindOperand* cast(LOperand* op) {
+ ASSERT(op->kind() == kOperandKind);
+ return reinterpret_cast<LSubKindOperand*>(op);
}
static void SetUpCache();
static void TearDownCache();
private:
- static const int kNumCachedOperands = 128;
- static LConstantOperand* cache;
+ static LSubKindOperand* cache;
- LConstantOperand() : LOperand() { }
- explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
+ LSubKindOperand() : LOperand() { }
+ explicit LSubKindOperand(int index) : LOperand(kOperandKind, index) { }
};
-class LArgument V8_FINAL : public LOperand {
- public:
- explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
-
- static LArgument* cast(LOperand* op) {
- ASSERT(op->IsArgument());
- return reinterpret_cast<LArgument*>(op);
- }
-};
-
-
-class LStackSlot V8_FINAL : public LOperand {
- public:
- static LStackSlot* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LStackSlot(index);
- }
-
- static LStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LStackSlot*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LStackSlot* cache;
-
- LStackSlot() : LOperand() { }
- explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
-};
-
-
-class LDoubleStackSlot V8_FINAL : public LOperand {
- public:
- static LDoubleStackSlot* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LDoubleStackSlot(index);
- }
-
- static LDoubleStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LDoubleStackSlot*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LDoubleStackSlot* cache;
-
- LDoubleStackSlot() : LOperand() { }
- explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
-};
-
-
-class LRegister V8_FINAL : public LOperand {
- public:
- static LRegister* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LRegister(index);
- }
-
- static LRegister* cast(LOperand* op) {
- ASSERT(op->IsRegister());
- return reinterpret_cast<LRegister*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LRegister* cache;
-
- LRegister() : LOperand() { }
- explicit LRegister(int index) : LOperand(REGISTER, index) { }
-};
-
-
-class LDoubleRegister V8_FINAL : public LOperand {
- public:
- static LDoubleRegister* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LDoubleRegister(index);
- }
-
- static LDoubleRegister* cast(LOperand* op) {
- ASSERT(op->IsDoubleRegister());
- return reinterpret_cast<LDoubleRegister*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LDoubleRegister* cache;
-
- LDoubleRegister() : LOperand() { }
- explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
-};
+#define LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
+typedef LSubKindOperand<LOperand::type, number> L##name;
+LITHIUM_OPERAND_LIST(LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS)
+#undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS
class LParallelMove V8_FINAL : public ZoneObject {
@@ -679,7 +572,7 @@ class ShallowIterator V8_FINAL BASE_EMBEDDED {
private:
bool ShouldSkip(LOperand* op) {
- return op == NULL || op->IsConstantOperand() || op->IsArgument();
+ return op == NULL || op->IsConstantOperand();
}
// Skip until something interesting, beginning with and including current_.
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 002e062436..5eae1073a4 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -49,14 +49,14 @@ namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
-void SetElementNonStrict(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value) {
+void SetElementSloppy(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value) {
// Ignore return value from SetElement. It can only be a failure if there
// are element setters causing exceptions and the debugger context has none
// of these.
Handle<Object> no_failure =
- JSObject::SetElement(object, index, value, NONE, kNonStrictMode);
+ JSObject::SetElement(object, index, value, NONE, SLOPPY);
ASSERT(!no_failure.is_null());
USE(no_failure);
}
@@ -359,17 +359,17 @@ class CompareOutputArrayWriter {
void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
Isolate* isolate = array_->GetIsolate();
- SetElementNonStrict(array_,
- current_size_,
- Handle<Object>(Smi::FromInt(char_pos1), isolate));
- SetElementNonStrict(array_,
- current_size_ + 1,
- Handle<Object>(Smi::FromInt(char_pos1 + char_len1),
- isolate));
- SetElementNonStrict(array_,
- current_size_ + 2,
- Handle<Object>(Smi::FromInt(char_pos2 + char_len2),
- isolate));
+ SetElementSloppy(array_,
+ current_size_,
+ Handle<Object>(Smi::FromInt(char_pos1), isolate));
+ SetElementSloppy(array_,
+ current_size_ + 1,
+ Handle<Object>(Smi::FromInt(char_pos1 + char_len1),
+ isolate));
+ SetElementSloppy(array_,
+ current_size_ + 2,
+ Handle<Object>(Smi::FromInt(char_pos2 + char_len2),
+ isolate));
current_size_ += 3;
}
@@ -662,20 +662,20 @@ class JSArrayBasedStruct {
protected:
void SetField(int field_position, Handle<Object> value) {
- SetElementNonStrict(array_, field_position, value);
+ SetElementSloppy(array_, field_position, value);
}
void SetSmiValueField(int field_position, int value) {
- SetElementNonStrict(array_,
- field_position,
- Handle<Smi>(Smi::FromInt(value), isolate()));
+ SetElementSloppy(array_,
+ field_position,
+ Handle<Smi>(Smi::FromInt(value), isolate()));
}
- Object* GetField(int field_position) {
- return array_->GetElementNoExceptionThrown(isolate(), field_position);
+ Handle<Object> GetField(int field_position) {
+ return Object::GetElementNoExceptionThrown(
+ isolate(), array_, field_position);
}
int GetSmiValueField(int field_position) {
- Object* res = GetField(field_position);
- CHECK(res->IsSmi());
- return Smi::cast(res)->value();
+ Handle<Object> res = GetField(field_position);
+ return Handle<Smi>::cast(res)->value();
}
private:
@@ -724,17 +724,15 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
return this->GetSmiValueField(kParentIndexOffset_);
}
Handle<Code> GetFunctionCode() {
- Object* element = this->GetField(kCodeOffset_);
- CHECK(element->IsJSValue());
- Handle<JSValue> value_wrapper(JSValue::cast(element));
+ Handle<Object> element = this->GetField(kCodeOffset_);
+ Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
CHECK(raw_result->IsCode());
return Handle<Code>::cast(raw_result);
}
Handle<Object> GetCodeScopeInfo() {
- Object* element = this->GetField(kCodeScopeInfoOffset_);
- CHECK(element->IsJSValue());
- return UnwrapJSValue(Handle<JSValue>(JSValue::cast(element)));
+ Handle<Object> element = this->GetField(kCodeScopeInfoOffset_);
+ return UnwrapJSValue(Handle<JSValue>::cast(element));
}
int GetStartPosition() {
return this->GetSmiValueField(kStartPositionOffset_);
@@ -767,8 +765,8 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
public:
static bool IsInstance(Handle<JSArray> array) {
return array->length() == Smi::FromInt(kSize_) &&
- array->GetElementNoExceptionThrown(
- array->GetIsolate(), kSharedInfoOffset_)->IsJSValue();
+ Object::GetElementNoExceptionThrown(
+ array->GetIsolate(), array, kSharedInfoOffset_)->IsJSValue();
}
explicit SharedInfoWrapper(Handle<JSArray> array)
@@ -785,9 +783,8 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
this->SetSmiValueField(kEndPositionOffset_, end_position);
}
Handle<SharedFunctionInfo> GetInfo() {
- Object* element = this->GetField(kSharedInfoOffset_);
- CHECK(element->IsJSValue());
- Handle<JSValue> value_wrapper(JSValue::cast(element));
+ Handle<Object> element = this->GetField(kSharedInfoOffset_);
+ Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
return UnwrapSharedFunctionInfoFromJSValue(value_wrapper);
}
@@ -818,7 +815,7 @@ class FunctionInfoListener {
fun->materialized_literal_count(),
current_parent_index_);
current_parent_index_ = len_;
- SetElementNonStrict(result_, len_, info.GetJSArray());
+ SetElementSloppy(result_, len_, info.GetJSArray());
len_++;
}
@@ -826,8 +823,8 @@ class FunctionInfoListener {
HandleScope scope(isolate());
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(
- isolate(), current_parent_index_));
+ *Object::GetElementNoExceptionThrown(
+ isolate(), result_, current_parent_index_));
current_parent_index_ = info.GetParentIndex();
}
@@ -836,8 +833,8 @@ class FunctionInfoListener {
void FunctionCode(Handle<Code> function_code) {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(
- isolate(), current_parent_index_));
+ *Object::GetElementNoExceptionThrown(
+ isolate(), result_, current_parent_index_));
info.SetFunctionCode(function_code,
Handle<HeapObject>(isolate()->heap()->null_value()));
}
@@ -851,8 +848,8 @@ class FunctionInfoListener {
}
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(
- isolate(), current_parent_index_));
+ *Object::GetElementNoExceptionThrown(
+ isolate(), result_, current_parent_index_));
info.SetFunctionCode(Handle<Code>(shared->code()),
Handle<HeapObject>(shared->scope_info()));
info.SetSharedFunctionInfo(shared);
@@ -885,20 +882,20 @@ class FunctionInfoListener {
context_list.Sort(&Variable::CompareIndex);
for (int i = 0; i < context_list.length(); i++) {
- SetElementNonStrict(scope_info_list,
- scope_info_length,
- context_list[i]->name());
+ SetElementSloppy(scope_info_list,
+ scope_info_length,
+ context_list[i]->name());
scope_info_length++;
- SetElementNonStrict(
+ SetElementSloppy(
scope_info_list,
scope_info_length,
Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate()));
scope_info_length++;
}
- SetElementNonStrict(scope_info_list,
- scope_info_length,
- Handle<Object>(isolate()->heap()->null_value(),
- isolate()));
+ SetElementSloppy(scope_info_list,
+ scope_info_length,
+ Handle<Object>(isolate()->heap()->null_value(),
+ isolate()));
scope_info_length++;
current_scope = current_scope->outer_scope();
@@ -959,11 +956,11 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate);
Handle<JSValue> script_obj = GetScriptWrapper(message_location.script());
JSReceiver::SetProperty(
- rethrow_exception, start_pos_key, start_pos, NONE, kNonStrictMode);
+ rethrow_exception, start_pos_key, start_pos, NONE, SLOPPY);
JSReceiver::SetProperty(
- rethrow_exception, end_pos_key, end_pos, NONE, kNonStrictMode);
+ rethrow_exception, end_pos_key, end_pos, NONE, SLOPPY);
JSReceiver::SetProperty(
- rethrow_exception, script_obj_key, script_obj, NONE, kNonStrictMode);
+ rethrow_exception, script_obj_key, script_obj, NONE, SLOPPY);
}
}
@@ -987,12 +984,12 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
for (int i = 0; i < len; i++) {
Handle<SharedFunctionInfo> info(
SharedFunctionInfo::cast(
- array->GetElementNoExceptionThrown(isolate, i)));
+ *Object::GetElementNoExceptionThrown(isolate, array, i)));
SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info);
- SetElementNonStrict(array, i, info_wrapper.GetJSArray());
+ SetElementSloppy(array, i, info_wrapper.GetJSArray());
}
}
@@ -1361,23 +1358,24 @@ static int TranslatePosition(int original_position,
Isolate* isolate = position_change_array->GetIsolate();
// TODO(635): binary search may be used here
for (int i = 0; i < array_len; i += 3) {
- Object* element =
- position_change_array->GetElementNoExceptionThrown(isolate, i);
+ HandleScope scope(isolate);
+ Handle<Object> element = Object::GetElementNoExceptionThrown(
+ isolate, position_change_array, i);
CHECK(element->IsSmi());
- int chunk_start = Smi::cast(element)->value();
+ int chunk_start = Handle<Smi>::cast(element)->value();
if (original_position < chunk_start) {
break;
}
- element = position_change_array->GetElementNoExceptionThrown(isolate,
- i + 1);
+ element = Object::GetElementNoExceptionThrown(
+ isolate, position_change_array, i + 1);
CHECK(element->IsSmi());
- int chunk_end = Smi::cast(element)->value();
+ int chunk_end = Handle<Smi>::cast(element)->value();
// Position mustn't be inside a chunk.
ASSERT(original_position >= chunk_end);
- element = position_change_array->GetElementNoExceptionThrown(isolate,
- i + 2);
+ element = Object::GetElementNoExceptionThrown(
+ isolate, position_change_array, i + 2);
CHECK(element->IsSmi());
- int chunk_changed_end = Smi::cast(element)->value();
+ int chunk_changed_end = Handle<Smi>::cast(element)->value();
position_diff = chunk_changed_end - chunk_end;
}
@@ -1472,7 +1470,6 @@ static Handle<Code> PatchPositionsInCode(
code->instruction_start());
{
- DisallowHeapAllocation no_allocation;
for (RelocIterator it(*code); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsPosition(rinfo->rmode())) {
@@ -1557,7 +1554,6 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
copy->set_name(original->name());
copy->set_line_offset(original->line_offset());
copy->set_column_offset(original->column_offset());
- copy->set_data(original->data());
copy->set_type(original->type());
copy->set_context_data(original->context_data());
copy->set_eval_from_shared(original->eval_from_shared());
@@ -1632,16 +1628,15 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
for (int i = 0; i < len; i++) {
- Object* element =
- shared_info_array->GetElementNoExceptionThrown(isolate, i);
- CHECK(element->IsJSValue());
- Handle<JSValue> jsvalue(JSValue::cast(element));
+ HandleScope scope(isolate);
+ Handle<Object> element =
+ Object::GetElementNoExceptionThrown(isolate, shared_info_array, i);
+ Handle<JSValue> jsvalue = Handle<JSValue>::cast(element);
Handle<SharedFunctionInfo> shared =
UnwrapSharedFunctionInfoFromJSValue(jsvalue);
if (function->shared() == *shared || IsInlined(*function, *shared)) {
- SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status),
- isolate));
+ SetElementSloppy(result, i, Handle<Smi>(Smi::FromInt(status), isolate));
return true;
}
}
@@ -1951,11 +1946,12 @@ static const char* DropActivationsInActiveThread(
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
- if (result->GetElement(result->GetIsolate(), i) ==
- Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
+ Handle<Object> obj =
+ Object::GetElementNoExceptionThrown(isolate, result, i);
+ if (*obj == Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
Handle<Object> replaced(
Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
- SetElementNonStrict(result, i, replaced);
+ SetElementSloppy(result, i, replaced);
}
}
return NULL;
@@ -1996,7 +1992,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// Fill the default values.
for (int i = 0; i < len; i++) {
- SetElementNonStrict(
+ SetElementSloppy(
result,
i,
Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH), isolate));
@@ -2017,9 +2013,9 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
DropActivationsInActiveThread(shared_info_array, result, do_drop);
if (error_message != NULL) {
// Add error message as an array extra element.
- Vector<const char> vector_message(error_message, StrLength(error_message));
- Handle<String> str = isolate->factory()->NewStringFromAscii(vector_message);
- SetElementNonStrict(result, len, str);
+ Handle<String> str = isolate->factory()->NewStringFromAscii(
+ CStrVector(error_message));
+ SetElementSloppy(result, len, str);
}
return result;
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 1c332d1736..942170c283 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1124,8 +1124,14 @@ void Logger::LeaveExternal(Isolate* isolate) {
}
+void Logger::LogInternalEvents(const char* name, int se) {
+ Isolate* isolate = Isolate::Current();
+ LOG(isolate, TimerEvent(static_cast<StartEnd>(se), name));
+}
+
+
void Logger::TimerEventScope::LogTimerEvent(StartEnd se) {
- LOG(isolate_, TimerEvent(se, name_));
+ isolate_->event_logger()(name_, se);
}
@@ -1192,37 +1198,33 @@ void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
void Logger::LogRuntime(Vector<const char> format,
- JSArray* args) {
+ Handle<JSArray> args) {
if (!log_->IsEnabled() || !FLAG_log_runtime) return;
- HandleScope scope(isolate_);
Log::MessageBuilder msg(log_);
for (int i = 0; i < format.length(); i++) {
char c = format[i];
if (c == '%' && i <= format.length() - 2) {
i++;
ASSERT('0' <= format[i] && format[i] <= '9');
- MaybeObject* maybe = args->GetElement(isolate_, format[i] - '0');
- Object* obj;
- if (!maybe->ToObject(&obj)) {
- msg.Append("<exception>");
- continue;
- }
+ // No exception expected when getting an element from an array literal.
+ Handle<Object> obj =
+ Object::GetElementNoExceptionThrown(isolate_, args, format[i] - '0');
i++;
switch (format[i]) {
case 's':
- msg.AppendDetailed(String::cast(obj), false);
+ msg.AppendDetailed(String::cast(*obj), false);
break;
case 'S':
- msg.AppendDetailed(String::cast(obj), true);
+ msg.AppendDetailed(String::cast(*obj), true);
break;
case 'r':
- Logger::LogRegExpSource(Handle<JSRegExp>(JSRegExp::cast(obj)));
+ Logger::LogRegExpSource(Handle<JSRegExp>::cast(obj));
break;
case 'x':
- msg.Append("0x%x", Smi::cast(obj)->value());
+ msg.Append("0x%x", Smi::cast(*obj)->value());
break;
case 'i':
- msg.Append("%i", Smi::cast(obj)->value());
+ msg.Append("%i", Smi::cast(*obj)->value());
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index d4dc76a21c..c01aca273a 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -316,15 +316,18 @@ class Logger {
static void EnterExternal(Isolate* isolate);
static void LeaveExternal(Isolate* isolate);
+ static void EmptyLogInternalEvents(const char* name, int se) { }
+ static void LogInternalEvents(const char* name, int se);
+
class TimerEventScope {
public:
TimerEventScope(Isolate* isolate, const char* name)
: isolate_(isolate), name_(name) {
- if (FLAG_log_internal_timer_events) LogTimerEvent(START);
+ LogTimerEvent(START);
}
~TimerEventScope() {
- if (FLAG_log_internal_timer_events) LogTimerEvent(END);
+ LogTimerEvent(END);
}
void LogTimerEvent(StartEnd se);
@@ -346,7 +349,7 @@ class Logger {
void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
// Log an event reported from generated code
- void LogRuntime(Vector<const char> format, JSArray* args);
+ void LogRuntime(Vector<const char> format, Handle<JSArray> args);
bool is_logging() {
return is_logging_;
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 9fdf2ee7d8..b05868c01b 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -72,6 +72,14 @@ const int kInvalidProtoDepth = -1;
#include "x64/assembler-x64-inl.h"
#include "code.h" // must be after assembler_*.h
#include "x64/macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/constants-arm64.h"
+#include "assembler.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
+#include "code.h" // must be after assembler_*.h
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/macro-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
@@ -116,6 +124,7 @@ class FrameScope {
// scope, the MacroAssembler is still marked as being in a frame scope, and
// the code will be generated again when it goes out of scope.
void GenerateLeaveFrame() {
+ ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
masm_->LeaveFrame(type_);
}
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 1722c6c7de..0b69e6b80e 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -162,6 +162,7 @@ macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
# Private names.
+macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateSymbol(name));
macro NEW_PRIVATE(name) = (%CreatePrivateSymbol(name));
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
macro HAS_PRIVATE(obj, sym) = (sym in obj);
diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h
index 321309c60e..a42e0f7f12 100644
--- a/deps/v8/src/mark-compact-inl.h
+++ b/deps/v8/src/mark-compact-inl.h
@@ -81,14 +81,15 @@ bool MarkCompactCollector::IsMarked(Object* obj) {
void MarkCompactCollector::RecordSlot(Object** anchor_slot,
Object** slot,
- Object* object) {
+ Object* object,
+ SlotsBuffer::AdditionMode mode) {
Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (object_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(anchor_slot)) {
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
object_page->slots_buffer_address(),
slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ mode)) {
EvictEvacuationCandidate(object_page);
}
}
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index f38fa5ef1f..f04a8bcb9a 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -67,6 +67,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
compacting_(false),
was_marked_incrementally_(false),
sweeping_pending_(false),
+ pending_sweeper_jobs_semaphore_(0),
sequential_sweeping_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
@@ -91,8 +92,7 @@ class VerifyMarkingVisitor: public ObjectVisitor {
void VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
- rinfo->target_object())) {
+ if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
Object* p = rinfo->target_object();
VisitPointer(&p);
}
@@ -101,7 +101,7 @@ class VerifyMarkingVisitor: public ObjectVisitor {
void VisitCell(RelocInfo* rinfo) {
Code* code = rinfo->host();
ASSERT(rinfo->rmode() == RelocInfo::CELL);
- if (!Code::IsWeakEmbeddedObject(code->kind(), rinfo->target_cell())) {
+ if (!code->IsWeakObject(rinfo->target_cell())) {
ObjectVisitor::VisitCell(rinfo);
}
}
@@ -227,6 +227,10 @@ static void VerifyEvacuation(NewSpace* space) {
static void VerifyEvacuation(PagedSpace* space) {
+ // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
+ // swept pages.
+ if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
+ space->was_swept_conservatively()) return;
PageIterator it(space);
while (it.has_next()) {
@@ -569,6 +573,27 @@ void MarkCompactCollector::ClearMarkbits() {
}
+class MarkCompactCollector::SweeperTask : public v8::Task {
+ public:
+ SweeperTask(Heap* heap, PagedSpace* space)
+ : heap_(heap), space_(space) {}
+
+ virtual ~SweeperTask() {}
+
+ private:
+ // v8::Task overrides.
+ virtual void Run() V8_OVERRIDE {
+ heap_->mark_compact_collector()->SweepInParallel(space_);
+ heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
+ }
+
+ Heap* heap_;
+ PagedSpace* space_;
+
+ DISALLOW_COPY_AND_ASSIGN(SweeperTask);
+};
+
+
void MarkCompactCollector::StartSweeperThreads() {
// TODO(hpayer): This check is just used for debugging purpose and
// should be removed or turned into an assert after investigating the
@@ -579,6 +604,14 @@ void MarkCompactCollector::StartSweeperThreads() {
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
+ if (FLAG_job_based_sweeping) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->old_data_space()),
+ v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->old_pointer_space()),
+ v8::Platform::kShortRunningTask);
+ }
}
@@ -587,6 +620,12 @@ void MarkCompactCollector::WaitUntilSweepingCompleted() {
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
+ if (FLAG_job_based_sweeping) {
+ // Wait twice for both jobs.
+ pending_sweeper_jobs_semaphore_.Wait();
+ pending_sweeper_jobs_semaphore_.Wait();
+ }
+ ParallelSweepSpacesComplete();
sweeping_pending_ = false;
RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
@@ -616,7 +655,7 @@ intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) {
bool MarkCompactCollector::AreSweeperThreadsActivated() {
- return isolate()->sweeper_threads() != NULL;
+ return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
}
@@ -625,15 +664,17 @@ bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
}
-bool Marking::TransferMark(Address old_start, Address new_start) {
+void Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
MemoryChunk::FromAddress(new_start));
+ if (!heap_->incremental_marking()->IsMarking()) return;
+
// If the mark doesn't move, we don't check the color of the object.
// It doesn't matter whether the object is black, since it hasn't changed
// size, so the adjustment to the live data count will be zero anyway.
- if (old_start == new_start) return false;
+ if (old_start == new_start) return;
MarkBit new_mark_bit = MarkBitFrom(new_start);
MarkBit old_mark_bit = MarkBitFrom(old_start);
@@ -646,9 +687,8 @@ bool Marking::TransferMark(Address old_start, Address new_start) {
old_mark_bit.Clear();
ASSERT(IsWhite(old_mark_bit));
Marking::MarkBlack(new_mark_bit);
- return true;
+ return;
} else if (Marking::IsGrey(old_mark_bit)) {
- ASSERT(heap_->incremental_marking()->IsMarking());
old_mark_bit.Clear();
old_mark_bit.Next().Clear();
ASSERT(IsWhite(old_mark_bit));
@@ -661,8 +701,6 @@ bool Marking::TransferMark(Address old_start, Address new_start) {
ObjectColor new_color = Color(new_mark_bit);
ASSERT(new_color == old_color);
#endif
-
- return false;
}
@@ -1825,6 +1863,10 @@ class RootMarkingVisitor : public ObjectVisitor {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
+ // Skip the weak next code link in a code object, which is visited in
+ // ProcessTopOptimizedFrame.
+ void VisitNextCodeLink(Object** p) { }
+
private:
void MarkObjectByPointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
@@ -2018,7 +2060,7 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
int size = object->Size();
survivors_size += size;
- Heap::UpdateAllocationSiteFeedback(object);
+ Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
offset++;
current_cell >>= 1;
@@ -2041,8 +2083,8 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
}
Object* target = allocation->ToObjectUnchecked();
- MigrateObject(HeapObject::cast(target)->address(),
- object->address(),
+ MigrateObject(HeapObject::cast(target),
+ object,
size,
NEW_SPACE);
}
@@ -2784,19 +2826,21 @@ void MarkCompactCollector::ClearWeakCollections() {
// pointer iteration. This is an issue if the store buffer overflows and we
// have to scan the entire old space, including dead objects, looking for
// pointers to new space.
-void MarkCompactCollector::MigrateObject(Address dst,
- Address src,
+void MarkCompactCollector::MigrateObject(HeapObject* dst,
+ HeapObject* src,
int size,
AllocationSpace dest) {
+ Address dst_addr = dst->address();
+ Address src_addr = src->address();
HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
if (heap_profiler->is_tracking_object_moves()) {
- heap_profiler->ObjectMoveEvent(src, dst, size);
+ heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
}
- ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
+ ASSERT(heap()->AllowedToBeMigrated(src, dest));
ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
- Address src_slot = src;
- Address dst_slot = dst;
+ Address src_slot = src_addr;
+ Address dst_slot = dst_addr;
ASSERT(IsAligned(size, kPointerSize));
for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
@@ -2817,8 +2861,8 @@ void MarkCompactCollector::MigrateObject(Address dst,
dst_slot += kPointerSize;
}
- if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
- Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
+ if (compacting_ && dst->IsJSFunction()) {
+ Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
@@ -2828,21 +2872,36 @@ void MarkCompactCollector::MigrateObject(Address dst,
code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
+ } else if (compacting_ && dst->IsConstantPoolArray()) {
+ ConstantPoolArray* constant_pool = ConstantPoolArray::cast(dst);
+ for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) {
+ Address code_entry_slot =
+ dst_addr + constant_pool->OffsetOfElementAt(i);
+ Address code_entry = Memory::Address_at(code_entry_slot);
+
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+ SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ &migration_slots_buffer_,
+ SlotsBuffer::CODE_ENTRY_SLOT,
+ code_entry_slot,
+ SlotsBuffer::IGNORE_OVERFLOW);
+ }
+ }
}
} else if (dest == CODE_SPACE) {
- PROFILE(isolate(), CodeMoveEvent(src, dst));
- heap()->MoveBlock(dst, src, size);
+ PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
+ heap()->MoveBlock(dst_addr, src_addr, size);
SlotsBuffer::AddTo(&slots_buffer_allocator_,
&migration_slots_buffer_,
SlotsBuffer::RELOCATED_CODE_OBJECT,
- dst,
+ dst_addr,
SlotsBuffer::IGNORE_OVERFLOW);
- Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
+ Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
- heap()->MoveBlock(dst, src, size);
+ heap()->MoveBlock(dst_addr, src_addr, size);
}
- Memory::Address_at(src) = dst;
+ Memory::Address_at(src_addr) = dst_addr;
}
@@ -2977,8 +3036,8 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
+ MigrateObject(target,
+ object,
object_size,
target_space->identity());
heap()->mark_compact_collector()->tracer()->
@@ -2994,7 +3053,7 @@ void MarkCompactCollector::EvacuateNewSpace() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
// a mark-sweep allocation, there is no sense in trying to trigger one.
- AlwaysAllocateScope scope;
+ AlwaysAllocateScope scope(isolate());
heap()->CheckNewSpaceExpansionCriteria();
NewSpace* new_space = heap()->new_space();
@@ -3026,7 +3085,7 @@ void MarkCompactCollector::EvacuateNewSpace() {
void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate());
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
p->MarkSweptPrecisely();
@@ -3056,8 +3115,8 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
Object* target_object = target->ToObjectUnchecked();
- MigrateObject(HeapObject::cast(target_object)->address(),
- object_addr,
+ MigrateObject(HeapObject::cast(target_object),
+ object,
size,
space->identity());
ASSERT(object->map_word().IsForwardingAddress());
@@ -3170,13 +3229,21 @@ enum SkipListRebuildingMode {
};
+enum FreeSpaceTreatmentMode {
+ IGNORE_FREE_SPACE,
+ ZAP_FREE_SPACE
+};
+
+
// Sweep a space precisely. After this has been done the space can
// be iterated precisely, hitting only the live objects. Code space
// is always swept precisely because we want to be able to iterate
// over it. Map space is swept precisely, because it is not compacted.
// Slots in live objects pointing into evacuation candidates are updated
// if requested.
-template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
+template<SweepingMode sweeping_mode,
+ SkipListRebuildingMode skip_list_mode,
+ FreeSpaceTreatmentMode free_space_mode>
static void SweepPrecisely(PagedSpace* space,
Page* p,
ObjectVisitor* v) {
@@ -3210,6 +3277,9 @@ static void SweepPrecisely(PagedSpace* space,
for ( ; live_objects != 0; live_objects--) {
Address free_end = cell_base + offsets[live_index++] * kPointerSize;
if (free_end != free_start) {
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
+ }
space->Free(free_start, static_cast<int>(free_end - free_start));
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
@@ -3241,6 +3311,9 @@ static void SweepPrecisely(PagedSpace* space,
*cell = 0;
}
if (free_start != p->area_end()) {
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
+ }
space->Free(free_start, static_cast<int>(p->area_end() - free_start));
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
@@ -3386,13 +3459,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuateNewSpace();
}
- // We have to travers our allocation sites scratchpad which contains raw
- // pointers before we move objects. During new space evacauation we
- // gathered pretenuring statistics. The found allocation sites may not be
- // valid after compacting old space.
- heap()->ProcessPretenuringFeedback();
-
-
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuatePages();
}
@@ -3493,12 +3559,23 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
break;
case OLD_POINTER_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+ IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(
space, p, &updating_visitor);
break;
case CODE_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
- space, p, &updating_visitor);
+ if (FLAG_zap_code_space) {
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+ REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(
+ space, p, &updating_visitor);
+ } else {
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+ REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(
+ space, p, &updating_visitor);
+ }
break;
default:
UNREACHABLE();
@@ -3919,7 +3996,11 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
(mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
free_list == NULL));
- p->MarkSweptConservatively();
+ // When parallel sweeping is active, the page will be marked after
+ // sweeping by the main thread.
+ if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
+ p->MarkSweptConservatively();
+ }
intptr_t freed_bytes = 0;
size_t size = 0;
@@ -4009,6 +4090,7 @@ void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
if (p->TryParallelSweeping()) {
SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
free_list->Concatenate(&private_free_list);
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
}
}
}
@@ -4031,7 +4113,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
while (it.has_next()) {
Page* p = it.next();
- ASSERT(p->parallel_sweeping() == 0);
+ ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
ASSERT(!p->IsEvacuationCandidate());
// Clear sweeping flags indicating that marking bits are still intact.
@@ -4104,7 +4186,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
reinterpret_cast<intptr_t>(p));
}
- p->set_parallel_sweeping(1);
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
space->IncreaseUnsweptFreeBytes(p);
}
break;
@@ -4114,10 +4196,15 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
reinterpret_cast<intptr_t>(p));
}
- if (space->identity() == CODE_SPACE) {
- SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
+ if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
+ SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
+ space, p, NULL);
+ } else if (space->identity() == CODE_SPACE) {
+ SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
+ space, p, NULL);
} else {
- SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
+ SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
+ space, p, NULL);
}
pages_swept++;
break;
@@ -4146,7 +4233,7 @@ void MarkCompactCollector::SweepSpaces() {
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
- if (isolate()->num_sweeper_threads() > 0) {
+ if (AreSweeperThreadsActivated()) {
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
}
@@ -4161,20 +4248,22 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SequentialSweepingScope scope(this);
- SweepSpace(heap()->old_pointer_space(), how_to_sweep);
- SweepSpace(heap()->old_data_space(), how_to_sweep);
+ { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
+ { SequentialSweepingScope scope(this);
+ SweepSpace(heap()->old_pointer_space(), how_to_sweep);
+ SweepSpace(heap()->old_data_space(), how_to_sweep);
+ }
- if (how_to_sweep == PARALLEL_CONSERVATIVE ||
- how_to_sweep == CONCURRENT_CONSERVATIVE) {
- // TODO(hpayer): fix race with concurrent sweeper
- StartSweeperThreads();
- }
+ if (how_to_sweep == PARALLEL_CONSERVATIVE ||
+ how_to_sweep == CONCURRENT_CONSERVATIVE) {
+ // TODO(hpayer): fix race with concurrent sweeper
+ StartSweeperThreads();
+ }
- if (how_to_sweep == PARALLEL_CONSERVATIVE) {
- WaitUntilSweepingCompleted();
+ if (how_to_sweep == PARALLEL_CONSERVATIVE) {
+ WaitUntilSweepingCompleted();
+ }
}
-
RemoveDeadInvalidatedCode();
SweepSpace(heap()->code_space(), PRECISE);
@@ -4196,6 +4285,25 @@ void MarkCompactCollector::SweepSpaces() {
}
+void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+ if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
+ p->MarkSweptConservatively();
+ }
+ ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
+ }
+}
+
+
+void MarkCompactCollector::ParallelSweepSpacesComplete() {
+ ParallelSweepSpaceComplete(heap()->old_pointer_space());
+ ParallelSweepSpaceComplete(heap()->old_data_space());
+}
+
+
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (isolate()->debug()->IsLoaded() ||
@@ -4290,14 +4398,33 @@ static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ RelocInfo::Mode rmode = rinfo->rmode();
if (target_page->IsEvacuationCandidate() &&
(rinfo->host() == NULL ||
!ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- SlotTypeForRMode(rinfo->rmode()),
- rinfo->pc(),
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ bool success;
+ if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
+ // This doesn't need to be typed since it is just a normal heap pointer.
+ Object** target_pointer =
+ reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
+ success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ target_pointer,
+ SlotsBuffer::FAIL_ON_OVERFLOW);
+ } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
+ success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ SlotsBuffer::CODE_ENTRY_SLOT,
+ rinfo->constant_pool_entry_address(),
+ SlotsBuffer::FAIL_ON_OVERFLOW);
+ } else {
+ success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ SlotTypeForRMode(rmode),
+ rinfo->pc(),
+ SlotsBuffer::FAIL_ON_OVERFLOW);
+ }
+ if (!success) {
EvictEvacuationCandidate(target_page);
}
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 0773d02666..0ebe8a0f74 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -110,8 +110,7 @@ class Marking {
markbit.Next().Set();
}
- // Returns true if the the object whose mark is transferred is marked black.
- bool TransferMark(Address old_start, Address new_start);
+ void TransferMark(Address old_start, Address new_start);
#ifdef DEBUG
enum ObjectColor {
@@ -690,10 +689,14 @@ class MarkCompactCollector {
void RecordCodeEntrySlot(Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
- INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
+ INLINE(void RecordSlot(Object** anchor_slot,
+ Object** slot,
+ Object* object,
+ SlotsBuffer::AdditionMode mode =
+ SlotsBuffer::FAIL_ON_OVERFLOW));
- void MigrateObject(Address dst,
- Address src,
+ void MigrateObject(HeapObject* dst,
+ HeapObject* src,
int size,
AllocationSpace to_old_space);
@@ -744,6 +747,8 @@ class MarkCompactCollector {
void MarkAllocationSite(AllocationSite* site);
private:
+ class SweeperTask;
+
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector();
@@ -791,6 +796,8 @@ class MarkCompactCollector {
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_pending_;
+ Semaphore pending_sweeper_jobs_semaphore_;
+
bool sequential_sweeping_;
// A pointer to the current stack-allocated GC tracer object during a full
@@ -940,6 +947,12 @@ class MarkCompactCollector {
void SweepSpace(PagedSpace* space, SweeperType sweeper);
+ // Finalizes the parallel sweeping phase. Marks all the pages that were
+ // swept in parallel.
+ void ParallelSweepSpacesComplete();
+
+ void ParallelSweepSpaceComplete(PagedSpace* space);
+
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 3f4484a098..0077d0309f 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -61,7 +61,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
Factory* factory = isolate->factory();
Handle<String> type_handle = factory->InternalizeUtf8String(type);
@@ -82,10 +81,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
script_handle = GetScriptWrapper(loc->script());
}
- Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? Handle<Object>::cast(factory->undefined_value())
- : Handle<Object>::cast(stack_trace);
-
Handle<Object> stack_frames_handle = stack_frames.is_null()
? Handle<Object>::cast(factory->undefined_value())
: Handle<Object>::cast(stack_frames);
@@ -96,7 +91,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
start,
end,
script_handle,
- stack_trace_handle,
stack_frames_handle);
return message;
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 5d84e46caa..2f4be518b2 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -95,7 +95,6 @@ class MessageHandler {
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace,
Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index e9f1ae46c2..a389bb8fe5 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -45,10 +45,6 @@ var kMessages = {
unterminated_regexp: ["Invalid regular expression: missing /"],
regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
- invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
- invalid_lhs_in_for_in: ["Invalid left-hand side in for-in"],
- invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
- invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
multiple_defaults_in_switch: ["More than one default clause in switch statement"],
newline_after_throw: ["Illegal newline after throw"],
redeclaration: ["%0", " '", "%1", "' has already been declared"],
@@ -64,7 +60,6 @@ var kMessages = {
not_defined: ["%0", " is not defined"],
non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
- non_object_property_call: ["Cannot call method '", "%0", "' of ", "%1"],
with_expression: ["%0", " has no properties"],
illegal_invocation: ["Illegal invocation"],
no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
@@ -108,6 +103,7 @@ var kMessages = {
invalid_argument: ["invalid_argument"],
data_view_not_array_buffer: ["First argument to DataView constructor must be an ArrayBuffer"],
constructor_not_function: ["Constructor ", "%0", " requires 'new'"],
+ not_a_symbol: ["%0", " is not a symbol"],
not_a_promise: ["%0", " is not a promise"],
resolver_not_a_function: ["Promise resolver ", "%0", " is not a function"],
promise_cyclic: ["Chaining cycle detected for promise ", "%0"],
@@ -120,7 +116,7 @@ var kMessages = {
invalid_string_length: ["Invalid string length"],
invalid_typed_array_offset: ["Start offset is too large:"],
invalid_typed_array_length: ["Invalid typed array length"],
- invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
+ invalid_typed_array_alignment: ["%0", " of ", "%1", " should be a multiple of ", "%2"],
typed_array_set_source_too_large:
["Source is too large"],
typed_array_set_negative_offset:
@@ -133,6 +129,11 @@ var kMessages = {
stack_overflow: ["Maximum call stack size exceeded"],
invalid_time_value: ["Invalid time value"],
invalid_count_value: ["Invalid count value"],
+ // ReferenceError
+ invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
+ invalid_lhs_in_for: ["Invalid left-hand side in for-loop"],
+ invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
+ invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
// SyntaxError
paren_in_arg_string: ["Function arg string contains parenthesis"],
not_isvar: ["builtin %IS_VAR: not a variable"],
@@ -155,9 +156,9 @@ var kMessages = {
invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
strict_mode_with: ["Strict mode code may not include a with statement"],
strict_eval_arguments: ["Unexpected eval or arguments in strict mode"],
- too_many_arguments: ["Too many arguments in function call (only 32766 allowed)"],
- too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"],
- too_many_variables: ["Too many variables declared (only 131071 allowed)"],
+ too_many_arguments: ["Too many arguments in function call (only 65535 allowed)"],
+ too_many_parameters: ["Too many parameters in function definition (only 65535 allowed)"],
+ too_many_variables: ["Too many variables declared (only 4194303 allowed)"],
strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
strict_octal_literal: ["Octal literals are not allowed in strict mode."],
strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
@@ -176,7 +177,8 @@ var kMessages = {
cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
harmony_const_assign: ["Assignment to constant variable."],
- symbol_to_string: ["Conversion from symbol to string"],
+ symbol_to_string: ["Cannot convert a Symbol value to a string"],
+ symbol_to_primitive: ["Cannot convert a Symbol wrapper object to a primitive value"],
invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
module_type_error: ["Module '", "%0", "' used improperly"],
module_export_undefined: ["Export '", "%0", "' is not defined in module"]
@@ -786,11 +788,10 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-//TODO(rossberg)
-var CallSiteReceiverKey = NEW_PRIVATE("receiver");
-var CallSiteFunctionKey = NEW_PRIVATE("function");
-var CallSitePositionKey = NEW_PRIVATE("position");
-var CallSiteStrictModeKey = NEW_PRIVATE("strict mode");
+var CallSiteReceiverKey = NEW_PRIVATE("CallSite#receiver");
+var CallSiteFunctionKey = NEW_PRIVATE("CallSite#function");
+var CallSitePositionKey = NEW_PRIVATE("CallSite#position");
+var CallSiteStrictModeKey = NEW_PRIVATE("CallSite#strict_mode");
function CallSite(receiver, fun, pos, strict_mode) {
SET_PRIVATE(this, CallSiteReceiverKey, receiver);
@@ -939,14 +940,10 @@ function CallSiteToString() {
if (this.isNative()) {
fileLocation = "native";
} else {
- if (this.isEval()) {
- fileName = this.getScriptNameOrSourceURL();
- if (!fileName) {
- fileLocation = this.getEvalOrigin();
- fileLocation += ", "; // Expecting source position to follow.
- }
- } else {
- fileName = this.getFileName();
+ fileName = this.getScriptNameOrSourceURL();
+ if (!fileName && this.isEval()) {
+ fileLocation = this.getEvalOrigin();
+ fileLocation += ", "; // Expecting source position to follow.
}
if (fileName) {
@@ -1077,15 +1074,15 @@ function FormatErrorString(error) {
function GetStackFrames(raw_stack) {
var frames = new InternalArray();
- var non_strict_frames = raw_stack[0];
+ var sloppy_frames = raw_stack[0];
for (var i = 1; i < raw_stack.length; i += 4) {
var recv = raw_stack[i];
var fun = raw_stack[i + 1];
var code = raw_stack[i + 2];
var pc = raw_stack[i + 3];
var pos = %FunctionGetPositionForOffset(code, pc);
- non_strict_frames--;
- frames.push(new CallSite(recv, fun, pos, (non_strict_frames < 0)));
+ sloppy_frames--;
+ frames.push(new CallSite(recv, fun, pos, (sloppy_frames < 0)));
}
return frames;
}
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 514b3aaa4f..f7f4354137 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -128,7 +128,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -156,6 +156,12 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
@@ -163,7 +169,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -179,21 +185,22 @@ Address Assembler::target_address_from_return_address(Address pc) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
+ Assembler::target_address_at(pc_, host_)));
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@@ -205,7 +212,7 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -260,13 +267,14 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + Assembler::kInstrSize));
+ Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
+ host_,
stub->instruction_start());
}
@@ -277,7 +285,7 @@ Address RelocInfo::call_address() {
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -287,7 +295,7 @@ void RelocInfo::set_call_address(Address target) {
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -318,7 +326,7 @@ void RelocInfo::WipeOut() {
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, NULL);
+ Assembler::set_target_address_at(pc_, host_, NULL);
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index f551dd5e10..b659559fee 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -213,6 +213,11 @@ bool RelocInfo::IsCodedSpecially() {
}
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
// Patch the code at the current address with the supplied instructions.
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -313,11 +318,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
trampoline_pool_blocked_nesting_ = 0;
// We leave space (16 * kTrampolineSlotsSize)
// for BlockTrampolinePoolScope buffer.
- next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ next_buffer_check_ = FLAG_force_long_branches
+ ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
internal_trampoline_exception_ = false;
last_bound_pos_ = 0;
- trampoline_emitted_ = false;
+ trampoline_emitted_ = FLAG_force_long_branches;
unbound_labels_count_ = 0;
block_buffer_growth_ = false;
@@ -2321,6 +2327,20 @@ void Assembler::JumpLabelToJumpRegister(Address pc) {
}
}
+
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 70f77eaeda..ea956e1355 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -37,6 +37,7 @@
#define V8_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
+
#include "assembler.h"
#include "constants-mips.h"
#include "serialize.h"
@@ -526,6 +527,26 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
static void set_target_address_at(Address pc, Address target);
+ // On MIPS there is no Constant Pool so we skip that parameter.
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool)) {
+ return target_address_at(pc);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target)) {
+ set_target_address_at(pc, target);
+ }
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ Code* code,
+ Address target)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -539,9 +560,10 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
+ Address instruction_payload, Code* code, Address target) {
set_target_address_at(
instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+ code,
target);
}
@@ -984,6 +1006,12 @@ class Assembler : public AssemblerBase {
void CheckTrampolinePool();
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 7a097a35a5..03d6cc80d6 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -163,10 +163,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// Tail call a stub.
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ li(a2, Operand(undefined_sentinel));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -335,7 +332,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@@ -345,10 +342,12 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool count_constructions,
+ bool create_memento) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- a2 : allocation site or undefined
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -356,6 +355,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
+
Isolate* isolate = masm->isolate();
// ----------- S t a t e -------------
@@ -369,6 +374,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(a2, a3);
+ __ push(a2);
+ }
+
// Preserve the two incoming parameters on the stack.
__ sll(a0, a0, kSmiTagSize); // Tag arguments count.
__ MultiPushReversed(a0.bit() | a1.bit());
@@ -417,7 +427,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(a1, a2, a1); // a1 = Constructor.
// The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ Pop(a1, a2);
@@ -428,13 +438,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a1: constructor function
// a2: initial map
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ Addu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
__ Allocate(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// a1: constructor function
// a2: initial map
- // a3: object size
+ // a3: object size (not including memento if create_memento)
// t4: JSObject (not tagged)
__ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
__ mov(t5, t4);
@@ -449,19 +463,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with appropriate filler.
// a1: constructor function
// a2: initial map
- // a3: object size (in words)
+ // a3: object size (in words, including memento if create_memento)
// t4: JSObject (not tagged)
// t5: First in-object property of JSObject (not tagged)
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t6, t4, t0); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
if (count_constructions) {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
__ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
__ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
- __ sll(t0, a0, kPointerSizeLog2);
- __ addu(a0, t5, t0);
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a0, t5, at);
+ __ sll(at, a3, kPointerSizeLog2);
+ __ Addu(t6, t4, Operand(at)); // End of object.
// a0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
@@ -470,8 +485,31 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ InitializeFieldsWithFiller(t5, a0, t7);
// To allow for truncation.
__ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(t5, t6, t7);
+ } else if (create_memento) {
+ __ Subu(t7, a3, Operand(AllocationMemento::kSize / kPointerSize));
+ __ sll(at, t7, kPointerSizeLog2);
+ __ Addu(a0, t4, Operand(at)); // End of object.
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ __ InitializeFieldsWithFiller(t5, a0, t7);
+
+ // Fill in memento fields.
+ // t5: points to the allocated but uninitialized memento.
+ __ LoadRoot(t7, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ // Load the AllocationSite.
+ __ lw(t7, MemOperand(sp, 2 * kPointerSize));
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ } else {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ __ sll(at, a3, kPointerSizeLog2);
+ __ Addu(a0, t4, Operand(at)); // End of object.
+ __ InitializeFieldsWithFiller(t5, a0, t7);
}
- __ InitializeFieldsWithFiller(t5, t6, t7);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -575,15 +613,48 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ UndoAllocationInNewSpace(t4, t5);
}
- __ bind(&rt_call);
// Allocate the new receiver object using the runtime call.
// a1: constructor function
+ __ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ lw(a2, MemOperand(sp, 2 * kPointerSize));
+ __ push(a2);
+ }
+
__ push(a1); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(t4, v0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// Receiver for constructor call allocated.
// t4: JSObject
+
+ if (create_memento) {
+ __ lw(a2, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
+ __ Branch(&count_incremented, eq, a2, Operand(t5));
+ // a2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ lw(a3, FieldMemOperand(a2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ Addu(a3, a3, Operand(Smi::FromInt(1)));
+ __ sw(a3, FieldMemOperand(a2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
__ bind(&allocated);
__ Push(t4, t4);
@@ -685,17 +756,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
@@ -757,9 +828,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(a0, a3);
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ li(a2, Operand(undefined_sentinel));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -785,7 +854,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
@@ -798,7 +867,7 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
__ Pop(a1);
}
@@ -907,7 +976,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -933,7 +1002,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> t2.
@@ -1015,7 +1084,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ Branch(&ok, hs, sp, Operand(at));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1067,7 +1136,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
// Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
__ sll(at, a0, kPointerSizeLog2);
__ addu(a2, sp, at);
@@ -1270,7 +1339,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(&push_receiver, ne, t3, Operand(zero_reg));
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(a0, &call_to_object);
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ Branch(&use_global_receiver, eq, a0, Operand(a1));
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index e38f181911..332ed4b6ab 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -46,7 +46,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
}
@@ -77,7 +77,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
@@ -88,7 +88,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
@@ -99,15 +100,15 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { a2, a3 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -142,7 +143,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
}
@@ -166,6 +167,26 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0, a2 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -227,7 +248,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
@@ -255,7 +276,7 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
@@ -366,7 +387,7 @@ void StringAddStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
}
@@ -1586,21 +1607,9 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ andi(scratch, value, 0xf);
- __ Branch(oom_label, eq, scratch, Operand(0xf));
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
// v0: result parameter for PerformGC, if any
@@ -1703,17 +1712,11 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
__ Branch(&retry, eq, t0, Operand(zero_reg));
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
-
// Retrieve the pending exception.
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ lw(v0, MemOperand(t0));
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
-
// Clear the pending exception.
__ li(a3, Operand(isolate->factory()->the_hole_value()));
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
@@ -1767,13 +1770,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
// Call into the runtime system.
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
false,
false);
@@ -1781,7 +1782,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
false);
@@ -1791,29 +1791,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
true);
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ li(a0, Operand(false, RelocInfo::NONE32));
- __ li(a2, Operand(external_caught));
- __ sw(a0, MemOperand(a2));
-
- // Set pending exception and v0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, v0, t0, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(v0, MemOperand(a2));
- // Fall through to the next label.
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, v0);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0);
+ }
__ bind(&throw_termination_exception);
__ ThrowUncatchable(v0);
@@ -2204,108 +2189,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = a0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
-
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a1,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a2;
- value = a0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : key
- // -----------------------------------
- receiver = a1;
- value = a0;
- }
- Register scratch = a3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ GetObjectType(receiver, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ GetObjectType(scratch, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&miss, eq, scratch, Operand(at));
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
Register InstanceofStub::left() { return a0; }
@@ -2365,7 +2248,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
@@ -2387,11 +2270,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
@@ -2455,7 +2338,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
- __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
+ __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
@@ -2464,7 +2347,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// a2 = argument count (tagged)
// Get the arguments boilerplate from the current native context into t0.
const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
@@ -2505,7 +2388,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, t0 will point there, otherwise
// it will point to the backing store.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
+ __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
// v0 = address of new object (tagged)
@@ -2523,7 +2406,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
- __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
__ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
__ Addu(t2, a1, Operand(Smi::FromInt(2)));
__ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
@@ -2606,7 +2489,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// a2 = argument count (tagged)
__ bind(&runtime);
__ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
@@ -2646,7 +2529,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+ __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ Allocate(a1, v0, a2, a3, &runtime,
@@ -2656,7 +2539,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
__ lw(t0, MemOperand(t0, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
@@ -2675,7 +2558,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
__ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
__ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
@@ -2704,7 +2587,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
}
@@ -2713,7 +2596,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -3105,7 +2988,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3152,83 +3035,101 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : Feedback vector
+ // a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ // Load the cache state into t0.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ Branch(&done, eq, a3, Operand(a1));
-
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in a3.
- __ lw(t1, FieldMemOperand(a3, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, t1, Operand(at));
-
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&megamorphic, ne, a1, Operand(a3));
- __ jmp(&done);
+ __ Branch(&done, eq, t0, Operand(a1));
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in a3.
+ __ lw(t1, FieldMemOperand(t0, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, t1, Operand(at));
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ Branch(&megamorphic, ne, a1, Operand(t0));
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&initialize, eq, a3, Operand(at));
+ __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+ __ Branch(&initialize, eq, t0, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
+ // An uninitialized cache is patched with the function.
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&not_array_function, ne, a1, Operand(a3));
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ Branch(&not_array_function, ne, a1, Operand(t0));
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ const RegList kSavedRegs =
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
- // The target function is the Array constructor.
- // Create an AllocationSite if we don't already have it, store it in the cell.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs =
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6; // a2
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(a0);
+ __ MultiPush(kSavedRegs);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(a0);
- __ MultiPush(kSavedRegs);
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ __ MultiPop(kSavedRegs);
+ __ SmiUntag(a0);
+ }
+ __ Branch(&done);
- __ MultiPop(kSavedRegs);
- __ SmiUntag(a0);
+ __ bind(&not_array_function);
}
- __ Branch(&done);
- __ bind(&not_array_function);
- __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sw(a1, MemOperand(t0, 0));
+
+ __ Push(t0, a2, a1);
+ __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(t0, a2, a1);
__ bind(&done);
}
@@ -3236,7 +3137,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : feedback vector
+ // a3 : (only if a2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
Label slow, non_function, wrap, cont;
if (NeedsChecks()) {
@@ -3245,11 +3148,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(a1, &non_function);
// Goto slow case if we do not have a function.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in a2 we need
+ // to set a2 to undefined.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
}
}
@@ -3269,7 +3176,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Branch(&cont, ne, at, Operand(zero_reg));
}
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ lw(a3, MemOperand(sp, argc_ * kPointerSize));
if (NeedsChecks()) {
@@ -3290,14 +3197,16 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ // object (megamorphic symbol) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, a2, Operand(t1));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sw(at, FieldMemOperand(t1, FixedArray::kHeaderSize));
}
// Check for function proxy.
- __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
__ push(a1); // Put proxy as additional argument.
__ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
__ li(a2, Operand(0, RelocInfo::NONE32));
@@ -3337,21 +3246,42 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : feedback vector
+ // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function_call);
// Check that the function is a JSFunction.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, a2, at);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into a2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by a3 + 1.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(a2, t1);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = a3;
+ Register jmp_reg = t0;
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3360,10 +3290,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
- // a3: object type
+ // t0: object type
Label do_call;
__ bind(&slow);
- __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
__ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -3441,7 +3371,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
// Save the conversion result before the pop instructions below
@@ -3465,7 +3395,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ sll(index_, index_, kSmiTagSize);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
__ Move(result_, v0);
@@ -3900,7 +3830,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// v0: original string
@@ -4065,7 +3995,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
@@ -4576,7 +4506,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -5008,7 +4938,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -5021,13 +4951,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
@@ -5041,18 +4971,10 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
__ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5361,7 +5283,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ Addu(a3, a3, Operand(1));
if (FLAG_debug_code) {
@@ -5468,46 +5390,33 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc (only if argument_count_ == ANY)
// -- a1 : constructor
- // -- a2 : type info cell
+ // -- a2 : AllocationSite or undefined
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
+
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ SmiTst(a3, at);
+ __ SmiTst(t0, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
- __ GetObjectType(a3, a3, t0);
+ __ GetObjectType(t0, t0, t1);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t0, Operand(MAP_TYPE));
+ t1, Operand(MAP_TYPE));
- // We should either have undefined in a2 or a valid cell.
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&okay_here, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, 0));
- __ Assert(eq, kExpectedPropertyCellInRegisterA2,
- a3, Operand(cell_map));
- __ bind(&okay_here);
+ // We should either have undefined in a2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(a2, t0);
}
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ lw(a2, FieldMemOperand(a2, Cell::kValueOffset));
-
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ lw(t0, FieldMemOperand(a2, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&no_info, ne, t0, Operand(at));
__ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
@@ -5615,7 +5524,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = cp;
int argc = ArgumentBits::decode(bit_field_);
- bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5682,15 +5591,20 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- MemOperand return_value_operand(fp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
+ // Stores return the first js argument.
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address,
thunk_ref,
kStackUnwindSpace,
return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
+ &context_restore_operand);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 8d65d5b055..e71c30583e 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -367,7 +367,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 1535231dd8..b9bf69db42 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -274,9 +274,10 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a1 : function
- // -- a2 : cache cell for call target
+ // -- a2 : feedback array
+ // -- a3 : slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), 0);
}
@@ -295,9 +296,10 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments (not smi)
// -- a1 : constructor function
- // -- a2 : cache cell for call target
+ // -- a2 : feedback array
+ // -- a3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
}
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 6bd9ba7b7f..0cd5e2ccd2 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -49,13 +49,36 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->break_(0xCC);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->break_(0xCC);
+ }
+ }
+
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
@@ -371,6 +394,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index d9c0c798a3..0ec2cbb864 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -176,6 +176,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP.
static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+ static const int kConstantPoolOffset = 0; // Not used.
};
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 18ee02dc5c..87c0764b60 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -120,6 +120,24 @@ class JumpPatchSite BASE_EMBEDDED {
};
+static void EmitStackCheck(MacroAssembler* masm_,
+ Register stack_limit_scratch,
+ int pointers = 0,
+ Register scratch = sp) {
+ Isolate* isolate = masm_->isolate();
+ Label ok;
+ ASSERT(scratch.is(sp) == (pointers == 0));
+ if (pointers != 0) {
+ __ Subu(scratch, sp, Operand(pointers * kPointerSize));
+ }
+ __ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, scratch, Operand(stack_limit_scratch));
+ PredictableCodeSizeScope predictable(masm_, 4 * Assembler::kInstrSize);
+ __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@@ -138,6 +156,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -152,10 +173,10 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->is_classic_mode() && !info->is_native()) {
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ lw(at, MemOperand(sp, receiver_offset));
@@ -184,22 +205,30 @@ void FullCodeGenerator::Generate() {
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- // Emit a loop to initialize stack cells for locals when optimizing for
- // size. Otherwise, unroll the loop for maximum performance.
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, a2, locals_count, t5);
+ }
__ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
- if ((FLAG_optimize_for_size && locals_count > 4) ||
- !is_int16(locals_count)) {
- Label loop;
- __ Subu(a2, sp, Operand(locals_count * kPointerSize));
- __ bind(&loop);
- __ Subu(sp, sp, Operand(kPointerSize));
- __ Branch(&loop, gt, sp, Operand(a2), USE_DELAY_SLOT);
- __ sw(t5, MemOperand(sp, 0)); // Push in the delay slot.
- } else {
- __ Subu(sp, sp, Operand(locals_count * kPointerSize));
- for (int i = 0; i < locals_count; i++) {
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ li(a2, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ __ Subu(sp, sp, Operand(kMaxPushes * kPointerSize));
+ for (int i = 0; i < kMaxPushes; i++) {
__ sw(t5, MemOperand(sp, i * kPointerSize));
}
+ // Continue loop if not done.
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&loop_header, ne, a2, Operand(zero_reg));
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ __ Subu(sp, sp, Operand(remaining * kPointerSize));
+ for (int i = 0; i < remaining; i++) {
+ __ sw(t5, MemOperand(sp, i * kPointerSize));
}
}
}
@@ -214,13 +243,13 @@ void FullCodeGenerator::Generate() {
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
__ push(a1);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in v0. It replaces the context passed to us.
@@ -270,12 +299,12 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(type);
__ CallStub(&stub);
@@ -301,7 +330,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -310,11 +339,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(t0));
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ EmitStackCheck(masm_, at);
}
{ Comment cmnt(masm_, "[ Body");
@@ -679,7 +704,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
__ mov(a0, result_register());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
+ CallIC(ic, condition->test_id());
__ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -802,7 +827,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -852,7 +877,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
__ Push(cp, a2, a1, a0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -908,7 +933,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(cp, a2, a1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -980,7 +1005,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(cp, a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -988,7 +1013,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -1044,7 +1069,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1087,6 +1112,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1172,13 +1198,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ li(a1, cell);
- __ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ sw(a2, FieldMemOperand(a1, Cell::kValueOffset));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ li(a1, FeedbackVector());
+ __ li(a2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
+ __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1338,7 +1364,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1346,7 +1372,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
: Heap::kFalseValueRootIndex);
__ Push(cp, a0, a1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(v0);
}
@@ -1368,7 +1394,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1380,7 +1406,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1421,7 +1447,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1457,19 +1483,18 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ Branch(done);
@@ -1486,7 +1511,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
__ lw(a0, GlobalObjectOperand());
@@ -1499,9 +1524,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1533,7 +1557,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1542,18 +1566,18 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(v0, var);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
__ Branch(&done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
}
@@ -1566,15 +1590,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ li(a1, Operand(var->name()));
__ Push(cp, a1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(v0);
}
@@ -1606,7 +1630,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ li(a2, Operand(expr->pattern()));
__ li(a1, Operand(expr->flags()));
__ Push(t0, a3, a2, a1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(t1, v0);
__ bind(&materialized);
@@ -1618,7 +1642,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ li(a0, Operand(Smi::FromInt(size)));
__ Push(t1, a0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(t1);
__ bind(&allocated);
@@ -1659,12 +1683,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -1703,7 +1726,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(a0, result_register());
__ li(a2, Operand(key->value()));
__ lw(a1, MemOperand(sp));
- CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1818,7 +1841,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ li(a0, Operand(Smi::FromInt(flags)));
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
@@ -1879,13 +1902,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2024,7 +2043,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
__ Branch(&post_runtime, eq, sp, Operand(a1));
__ push(v0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
__ pop(result_register());
@@ -2092,7 +2111,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a1, cp);
__ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
kRAHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(v0); // result
EmitReturnSequence();
@@ -2111,7 +2130,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ lw(a1, MemOperand(sp, kPointerSize));
__ lw(a0, MemOperand(sp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
+ CallIC(ic, TypeFeedbackId::None());
__ mov(a0, v0);
__ mov(a1, a0);
__ sw(a1, MemOperand(sp, 2 * kPointerSize));
@@ -2147,7 +2166,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in a0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
// a1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
@@ -2229,7 +2248,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
ASSERT(!result_register().is(a1));
__ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2244,14 +2263,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
} else {
// Throw the provided value.
__ push(a0);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(a1);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2269,7 +2288,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ lw(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2309,7 +2328,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2337,8 +2356,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2417,20 +2435,14 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidLeftHandSide());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2456,7 +2468,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(a1, result_register());
__ pop(a0); // Restore value.
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
- CallStoreIC(NOT_CONTEXTUAL);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2465,7 +2477,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->key());
__ mov(a1, result_register());
__ Pop(a0, a2); // a0 = restored value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2476,43 +2488,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ sw(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Move(a3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ li(a1, Operand(name));
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(a0, result_register());
__ li(a2, Operand(var->name()));
__ lw(a1, GlobalObjectOperand());
- CallStoreIC(CONTEXTUAL);
- } else if (op == Token::INIT_CONST) {
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ lw(a1, StackOperand(var));
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a1, Operand(t0));
- __ sw(result_register(), StackOperand(var));
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ li(a0, Operand(var->name()));
__ Push(v0, cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, a1);
+ __ lw(a2, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a2, Operand(at));
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2522,23 +2549,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Branch(&assign, ne, a3, Operand(t0));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- __ sw(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(a3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2546,23 +2569,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
}
- // Perform the assignment.
- __ sw(v0, location);
- if (var->IsContextSlot()) {
- __ mov(a3, v0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
- // Non-initializing assignments to consts are ignored.
+ // Non-initializing assignments to consts are ignored.
}
@@ -2578,7 +2588,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
__ pop(a1);
- CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2598,10 +2608,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ mov(a0, result_register());
__ Pop(a2, a1); // a1 = key.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2628,10 +2638,8 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- ContextualMode mode,
TypeFeedbackId id) {
ic_total_count_++;
- ASSERT(mode != CONTEXTUAL || id.IsNone());
__ Call(code, RelocInfo::CODE_TARGET, id);
}
@@ -2650,7 +2658,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
+ // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
flags = NO_CALL_FUNCTION_FLAGS;
} else {
@@ -2741,15 +2749,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ li(a2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2769,15 +2777,15 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ lw(t1, MemOperand(fp, receiver_offset * kPointerSize));
- // t0: the language mode.
- __ li(t0, Operand(Smi::FromInt(language_mode())));
+ // t0: the strict mode.
+ __ li(t0, Operand(Smi::FromInt(strict_mode())));
// a1: the start position of the scope the calls resides in.
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
__ Push(t2, t1, t0, a1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2793,8 +2801,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2849,7 +2857,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ASSERT(!context_register().is(a2));
__ li(a2, Operand(proxy->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ Push(v0, v1); // Function, receiver.
// If fast case code has been generated, emit code to push the
@@ -2928,10 +2936,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ li(a2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -3409,7 +3424,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ __ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
@@ -3506,7 +3521,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(v0);
}
@@ -3897,7 +3912,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(v0);
@@ -4178,8 +4193,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4242,9 +4257,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ li(a1, Operand(Smi::FromInt(strict_mode_flag)));
+ __ li(a1, Operand(Smi::FromInt(strict_mode())));
__ push(a1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -4252,11 +4265,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ lw(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ li(a0, Operand(Smi::FromInt(SLOPPY)));
__ Push(a2, a1, a0);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -4270,7 +4283,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!context_register().is(a2));
__ li(a2, Operand(var->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(v0);
}
} else {
@@ -4345,16 +4358,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4471,9 +4479,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- NOT_CONTEXTUAL,
- expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4503,7 +4509,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(a0, result_register()); // Value.
__ li(a2, Operand(prop->key()->AsLiteral()->value())); // Name.
__ pop(a1); // Receiver.
- CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4517,10 +4523,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ mov(a0, result_register()); // Value.
__ Pop(a2, a1); // a1 = key, a2 = receiver.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4540,7 +4546,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference
@@ -4549,6 +4555,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4558,7 +4565,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ li(a0, Operand(proxy->name()));
__ Push(cp, a0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4705,7 +4712,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -4739,7 +4746,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 14d1cd6827..09ffe95c02 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -229,7 +229,8 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ And(at, scratch,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ Branch(slow, ne, at, Operand(zero_reg));
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -338,8 +339,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -347,9 +347,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
@@ -419,6 +417,8 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register scratch3,
Label* unmapped_case,
Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
// Check that the receiver is a JSObject. Because of the map check
// later, we do not need to check for interceptors or whether it
// requires access checks.
@@ -432,10 +432,11 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ Branch(slow_case, ne, scratch1, Operand(zero_reg));
// Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1,
scratch2,
- Heap::kNonStrictArgumentsElementsMapRootIndex,
+ arguments_map,
slow_case,
DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments. If not, jump
@@ -498,7 +499,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- a0 : key
@@ -523,7 +524,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -649,7 +650,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
- masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+ masm, receiver, a2, a3, Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
@@ -802,7 +803,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -994,7 +995,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1180,8 +1181,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -1190,9 +1190,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
@@ -1240,7 +1238,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index f033f6d348..970a1bfc25 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -84,7 +84,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -146,11 +146,11 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
+ info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -207,7 +207,7 @@ bool LCodeGen::GeneratePrologue() {
__ CallStub(&stub);
} else {
__ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both v0. It replaces the context passed to us.
@@ -260,6 +260,9 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
@@ -274,7 +277,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -407,7 +411,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
__ li(scratch, literal);
}
return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
__ lw(scratch, ToMemOperand(op));
return scratch;
}
@@ -443,7 +447,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (r.IsTagged()) {
Abort(kUnsupportedTaggedImmediate);
}
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
MemOperand mem_op = ToMemOperand(op);
__ ldc1(dbl_scratch, mem_op);
return dbl_scratch;
@@ -661,10 +665,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -866,6 +866,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1062,174 +1070,180 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->RightIsPowerOf2()) {
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
// Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
- &left_is_not_negative, ge, left_reg, Operand(zero_reg));
- __ subu(result_reg, zero_reg, left_reg);
- __ And(result_reg, result_reg, divisor - 1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(result_reg, zero_reg, result_reg);
+ __ subu(dividend, zero_reg, dividend);
+ __ And(dividend, dividend, Operand(mask));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
}
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ subu(dividend, zero_reg, dividend);
+ }
- __ bind(&left_is_not_negative);
- __ And(result_reg, left_reg, divisor - 1);
- __ bind(&done);
- } else {
- const Register scratch = scratch0();
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
+ __ bind(&dividend_is_not_negative);
+ __ And(dividend, dividend, Operand(mask));
+ __ bind(&done);
+}
- // div runs in the background while we check for special cases.
- Register right_reg = EmitLoadRegister(instr->right(), scratch);
- __ div(left_reg, right_reg);
- Label done;
- // Check for x % 0, we have to deopt in this case because we can't return a
- // NaN.
- if (right->CanBeZero()) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- // Check for kMinInt % -1, we have to deopt if we care about -0, because we
- // can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
- // TODO(svenpanne) Don't deopt when we don't care about -0.
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
- __ bind(&left_not_min_int);
- }
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ Mul(result, result, Operand(Abs(divisor)));
+ __ Subu(result, dividend, Operand(result));
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ const Register left_reg = ToRegister(instr->left());
+ const Register right_reg = ToRegister(instr->right());
+ const Register result_reg = ToRegister(instr->result());
+
+ // div runs in the background while we check for special cases.
+ __ div(left_reg, right_reg);
- // TODO(svenpanne) Only emit the test/deopt if we have to.
- __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
- __ mfhi(result_reg);
+ Label done;
+ // Check for x % 0, we have to deopt in this case because we can't return a
+ // NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
+ }
+ // Check for kMinInt % -1, div will return kMinInt, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+ } else {
+ __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(result_reg, zero_reg);
}
- __ bind(&done);
+ __ bind(&no_overflow_possible);
+ }
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
+ __ mfhi(result_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
}
+ __ bind(&done);
}
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
- Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment) {
- ASSERT(!AreAliased(dividend, scratch, at, no_reg));
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ And(at, dividend, Operand(mask));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ Subu(result, zero_reg, dividend);
+ return;
+ }
+ uint16_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ Move(result, dividend);
+ } else if (shift == 1) {
+ __ srl(result, dividend, 31);
+ __ Addu(result, dividend, Operand(result));
+ } else {
+ __ sra(result, dividend, 31);
+ __ srl(result, result, 32 - shift);
+ __ Addu(result, dividend, Operand(result));
+ }
+ if (shift > 0) __ sra(result, result, shift);
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+}
+
- uint32_t divisor_abs = abs(divisor);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
- switch (divisor_abs) {
- case 0:
- DeoptimizeIf(al, environment);
- return;
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
- case 1:
- if (divisor > 0) {
- __ Move(result, dividend);
- } else {
- __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
- DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
- }
- // Compute the remainder.
- __ Move(remainder, zero_reg);
- return;
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
- default:
- if (IsPowerOf2(divisor_abs)) {
- // Branch and condition free code for integer division by a power
- // of two.
- int32_t power = WhichPowerOf2(divisor_abs);
- if (power > 1) {
- __ sra(scratch, dividend, power - 1);
- }
- __ srl(scratch, scratch, 32 - power);
- __ Addu(scratch, dividend, Operand(scratch));
- __ sra(result, scratch, power);
- // Negate if necessary.
- // We don't need to check for overflow because the case '-1' is
- // handled separately.
- if (divisor < 0) {
- ASSERT(divisor != -1);
- __ Subu(result, zero_reg, Operand(result));
- }
- // Compute the remainder.
- if (divisor > 0) {
- __ sll(scratch, result, power);
- __ Subu(remainder, dividend, Operand(scratch));
- } else {
- __ sll(scratch, result, power);
- __ Addu(remainder, dividend, Operand(scratch));
- }
- return;
- } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
- // Use magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer's Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- // Branch and condition free code for integer division by a power
- // of two.
- const int32_t M = magic_numbers.M;
- const int32_t s = magic_numbers.s + power_of_2_factor;
-
- __ li(scratch, Operand(M));
- __ mult(dividend, scratch);
- __ mfhi(scratch);
- if (M < 0) {
- __ Addu(scratch, scratch, Operand(dividend));
- }
- if (s > 0) {
- __ sra(scratch, scratch, s);
- __ mov(scratch, scratch);
- }
- __ srl(at, dividend, 31);
- __ Addu(result, scratch, Operand(at));
- if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
- // Compute the remainder.
- __ li(scratch, Operand(divisor));
- __ Mul(scratch, result, Operand(scratch));
- __ Subu(remainder, dividend, Operand(scratch));
- } else {
- __ li(scratch, Operand(divisor));
- __ div(dividend, scratch);
- __ mfhi(remainder);
- __ mflo(result);
- }
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ Mul(scratch0(), result, Operand(divisor));
+ __ Subu(scratch0(), scratch0(), dividend);
+ DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
}
}
void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
const Register left = ToRegister(instr->left());
const Register right = ToRegister(instr->right());
const Register result = ToRegister(instr->result());
@@ -1239,12 +1253,12 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ div(left, right);
// Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, left, Operand(zero_reg));
DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
@@ -1252,18 +1266,32 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
// Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
__ bind(&left_not_min_int);
}
- if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ if (hdiv->IsMathFloorOfDiv()) {
+ // We performed a truncating division. Correct the result if necessary.
+ Label done;
+ Register remainder = scratch0();
+ __ mfhi(remainder);
+ __ mflo(result);
+ __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+ __ Xor(remainder, remainder, Operand(right));
+ __ Branch(&done, ge, remainder, Operand(zero_reg));
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
+ } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
__ mfhi(result);
DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ __ mflo(result);
+ } else {
+ __ mflo(result);
}
- __ mflo(result);
}
@@ -1279,67 +1307,94 @@ void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->left());
- const Register remainder = ToRegister(instr->temp());
- const Register scratch = scratch0();
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+ Register scratch = scratch0();
+ ASSERT(!scratch.is(dividend));
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ uint16_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sra(result, dividend, shift);
+ return;
+ }
- if (instr->right()->IsConstantOperand()) {
- Label done;
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- if (divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ // If the divisor is negative, we have to negate and handle edge cases.
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ Move(scratch, dividend);
+ }
+ __ Subu(result, zero_reg, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ __ Xor(at, scratch, result);
+ if (divisor == -1) {
+ DeoptimizeIf(ge, instr->environment(), at, Operand(zero_reg));
+ __ sra(result, dividend, shift);
+ } else {
+ Label no_overflow, done;
+ __ Branch(&no_overflow, lt, at, Operand(zero_reg));
+ __ li(result, Operand(kMinInt / divisor));
+ __ Branch(&done);
+ __ bind(&no_overflow);
+ __ sra(result, dividend, shift);
+ __ bind(&done);
}
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We performed a truncating division. Correct the result if necessary.
- __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
- __ Xor(scratch , remainder, Operand(divisor));
- __ Branch(&done, ge, scratch, Operand(zero_reg));
- __ Subu(result, result, Operand(1));
- __ bind(&done);
} else {
- Label done;
- const Register right = ToRegister(instr->right());
-
- // On MIPS div is asynchronous - it will run in the background while we
- // check for special cases.
- __ div(left, right);
+ __ sra(result, dividend, shift);
+ }
+}
- // Check for x / 0.
- DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
- __ bind(&left_not_zero);
- }
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
- __ bind(&left_not_min_int);
- }
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
- __ mfhi(remainder);
- __ mflo(result);
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
- // We performed a truncating division. Correct the result if necessary.
- __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
- __ Xor(scratch , remainder, Operand(right));
- __ Branch(&done, ge, scratch, Operand(zero_reg));
- __ Subu(result, result, Operand(1));
- __ bind(&done);
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ return;
}
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
+ dividend, Operand(zero_reg));
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ __ jmp(&done);
+ __ bind(&needs_adjustment);
+ __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
}
@@ -1465,7 +1520,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister(instr->result());
Operand right(no_reg);
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, at));
} else {
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
@@ -1587,7 +1642,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (!can_overflow) {
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, at);
__ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
} else {
@@ -1597,9 +1652,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
} else { // can_overflow.
Register overflow = scratch0();
Register scratch = scratch1();
- if (right->IsStackSlot() ||
- right->IsArgument() ||
- right->IsConstantOperand()) {
+ if (right->IsStackSlot() || right->IsConstantOperand()) {
Register right_reg = EmitLoadRegister(right, scratch);
__ SubuAndCheckForOverflow(ToRegister(result),
ToRegister(left),
@@ -1779,7 +1832,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (!can_overflow) {
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, at);
__ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
} else {
@@ -1790,7 +1843,6 @@ void LCodeGen::DoAddI(LAddI* instr) {
Register overflow = scratch0();
Register scratch = scratch1();
if (right->IsStackSlot() ||
- right->IsArgument() ||
right->IsConstantOperand()) {
Register right_reg = EmitLoadRegister(right, scratch);
__ AdduAndCheckForOverflow(ToRegister(result),
@@ -3088,7 +3140,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3447,7 +3499,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
// The context is the first argument.
__ Push(cp, scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3536,7 +3588,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(v0))
@@ -3801,6 +3853,13 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ Clz(result, input);
+}
+
+
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(a1));
@@ -3881,8 +3940,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ li(a0, Operand(instr->arity()));
// No cell in a2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ li(a2, Operand(undefined_value));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3894,7 +3952,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a0, Operand(instr->arity()));
- __ li(a2, Operand(factory()->undefined_value()));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3974,12 +4032,21 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsSmi(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ SmiTst(value, scratch);
DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
} else if (representation.IsDouble()) {
ASSERT(transition.is_null());
@@ -4009,9 +4076,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation);
@@ -4053,8 +4117,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in a2.
__ li(a2, Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4185,7 +4248,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4310,7 +4373,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = (instr->strict_mode() == STRICT)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4421,7 +4484,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
instr->context());
__ AssertSmi(v0);
__ SmiUntag(v0);
@@ -4497,22 +4560,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- Register scratch = scratch0();
-
- ASSERT(output->IsRegister());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
- DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
- } else {
- __ SmiTag(ToRegister(output), ToRegister(input));
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4523,28 +4570,17 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- Register scratch = scratch0();
- __ And(scratch, ToRegister(input), Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
- }
- __ SmiTag(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4568,9 +4604,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4587,18 +4625,19 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
DoubleRegister dbl_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -4615,37 +4654,41 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
}
if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, t1);
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
__ Branch(&done);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ mov(dst, zero_reg);
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ Subu(v0, v0, kHeapObjectTag);
+ __ StoreToSafepointRegisterSlot(v0, dst);
+ }
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(zero_reg, dst);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Move(dst, v0);
- __ Subu(dst, dst, kHeapObjectTag);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
__ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
__ Addu(dst, dst, kHeapObjectTag);
- __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4694,11 +4737,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Subu(v0, v0, kHeapObjectTag);
@@ -4707,8 +4750,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ And(at, input, Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTagCheckOverflow(output, input, at);
+ DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
+ } else {
+ __ SmiTag(output, input);
+ }
}
@@ -5169,6 +5225,25 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ FmoveHigh(result_reg, value_reg);
+ } else {
+ __ FmoveLow(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ __ Move(result_reg, lo_reg, hi_reg);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5276,7 +5351,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -5310,7 +5385,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ li(t1, Operand(instr->hydrogen()->pattern()));
__ li(t0, Operand(instr->hydrogen()->flags()));
__ Push(t3, t2, t1, t0);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(a1, v0);
__ bind(&materialized);
@@ -5323,7 +5398,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ li(a0, Operand(Smi::FromInt(size)));
__ Push(a1, a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(a1);
__ bind(&allocated);
@@ -5348,7 +5423,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ li(a2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -5357,7 +5432,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
__ li(a1, Operand(pretenure ? factory()->true_value()
: factory()->false_value()));
__ Push(cp, a2, a1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5545,7 +5620,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5581,7 +5656,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -5617,10 +5692,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 1e572bc95f..63f0661ae5 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -124,9 +124,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -161,9 +163,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index d423040a0d..752f67673d 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -839,7 +839,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
@@ -1113,6 +1112,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1128,6 +1128,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@@ -1248,14 +1255,61 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LDivI* div = new(zone()) LDivI(dividend, divisor);
+ return AssignEnvironment(DefineAsRegister(div));
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LDivI* div = new(zone()) LDivI(dividend, divisor);
- return AssignEnvironment(DefineAsRegister(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1264,72 +1318,99 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
- uint32_t divisor_abs = abs(divisor);
- // Dividing by 0, 1, and powers of 2 is easy.
- // Note that IsPowerOf2(0) returns true;
- ASSERT(IsPowerOf2(0) == true);
- if (IsPowerOf2(divisor_abs)) return true;
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
- // We have magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer's Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
- return false;
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegisterOrConstant(right);
- LOperand* remainder = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->RightIsPowerOf2()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseConstant(right));
- LInstruction* result = DefineAsRegister(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- } else {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right),
- TempRegister(),
- FixedTemp(f20),
- FixedTemp(f22));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1)) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- }
+ return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
} else {
@@ -1774,25 +1855,27 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
- : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1868,6 +1951,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -2124,11 +2221,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier ||
- (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ if (needs_write_barrier || instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2138,8 +2233,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
+ if (instr->field_representation().IsHeapObject()) {
if (!instr->value()->type().IsHeapObject()) {
return AssignEnvironment(result);
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 39e2691849..ae59e57f27 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -80,17 +80,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleBits) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -103,7 +109,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -125,13 +130,15 @@ class LCodeGen;
V(MapEnumLength) \
V(MathAbs) \
V(MathExp) \
+ V(MathClz32) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -171,7 +178,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@@ -613,42 +619,94 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- // Used when the right hand is a constant power of 2.
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
}
- // Used for the standard case.
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+ public:
LModI(LOperand* left,
- LOperand* right,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
- temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LDivI(LOperand* left, LOperand* right) {
@@ -660,26 +718,46 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
};
@@ -802,6 +880,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
@@ -1863,19 +1953,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@@ -1888,38 +1965,33 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2004,6 +2076,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2079,7 +2152,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2136,7 +2209,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2339,6 +2412,33 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2553,8 +2653,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition) { }
+ allocator_(allocator) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2577,6 +2676,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2688,7 +2796,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HBasicBlock* current_block_;
HBasicBlock* next_block_;
LAllocator* allocator_;
- int position_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 69a2a3dc4b..77c02e7342 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -3440,8 +3440,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&is_nan);
// Load canonical NaN for storing into the double array.
LoadRoot(at, Heap::kNanValueRootIndex);
- lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
- lw(exponent_reg, FieldMemOperand(at, HeapNumber::kValueOffset + 4));
+ lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
+ lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
jmp(&have_double_value);
bind(&smi_value);
@@ -3986,7 +3986,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
{
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
0);
}
jmp(&exception_handled);
@@ -4346,16 +4346,8 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason,
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -4367,18 +4359,16 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- li(a0, Operand(p0));
- push(a0);
- li(a0, Operand(Smi::FromInt(p1 - p0)));
+ li(a0, Operand(Smi::FromInt(reason)));
push(a0);
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -4386,8 +4376,8 @@ void MacroAssembler::Abort(BailoutReason reason) {
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
// Currently in debug mode with debug_code enabled the number of
- // generated instructions is 14, so we use this as a maximum value.
- static const int kExpectedAbortInstructions = 14;
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@@ -4440,31 +4430,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- lw(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
lw(function,
@@ -4477,19 +4442,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- lw(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- lw(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- lw(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -4865,6 +4817,23 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ Branch(&done_checking, eq, object, Operand(scratch));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
+ pop(object);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
ASSERT(!reg.is(at));
@@ -5482,9 +5451,9 @@ void MacroAssembler::Throw(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// will not return here
if (is_trampoline_pool_blocked()) {
@@ -5754,6 +5723,28 @@ void CodePatcher::ChangeBranchCondition(Condition cond) {
}
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!dividend.is(result));
+ ASSERT(!dividend.is(at));
+ ASSERT(!result.is(at));
+ MultiplierAndShift ms(divisor);
+ li(at, Operand(ms.multiplier()));
+ Mult(dividend, Operand(at));
+ mfhi(result);
+ if (divisor > 0 && ms.multiplier() < 0) {
+ Addu(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && ms.multiplier() > 0) {
+ Subu(result, result, Operand(dividend));
+ }
+ if (ms.shift() > 0) sra(result, result, ms.shift());
+ srl(at, dividend, 31);
+ Addu(result, result, Operand(at));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 85347c9e51..db9f1a2c76 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -871,14 +871,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -1311,6 +1304,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
return code_object_;
}
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and at gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
// -------------------------------------------------------------------------
// StatsCounter support.
@@ -1435,6 +1432,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 10417d573c..d26499bbc5 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -925,6 +925,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
+Simulator::~Simulator() {
+}
+
+
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
@@ -1926,7 +1930,11 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
break;
case CLZ:
- alu_out = __builtin_clz(rs_u);
+ // MIPS32 spec: If no bits were set in GPR rs, the result written to
+ // GPR rd is 32.
+ // GCC __builtin_clz: If input is 0, the result is undefined.
+ alu_out =
+ rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index d9fd10f245..92a0a87d24 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -203,6 +203,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index d1b428a345..153a816820 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -313,7 +313,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -342,61 +342,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi, t0);
-
- // Check that the object is a string.
- __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ And(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ Branch(non_string_object,
- ne,
- scratch2,
- Operand(static_cast<int32_t>(kStringTag)));
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
-
- // Unwrap the value and check if the wrapped value is a string.
- __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -467,11 +412,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ li(scratch1, constant);
__ Branch(miss_label, ne, value_reg, Operand(scratch1));
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
@@ -545,15 +490,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -571,15 +516,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Get the properties array
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ sw(value_reg, FieldMemOperand(scratch1, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -630,11 +575,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -676,7 +621,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -700,7 +645,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ sw(value_reg, FieldMemOperand(scratch1, offset));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -770,13 +715,14 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
ASSERT(!receiver.is(scratch_in));
// Preparing to push, adjust sp.
__ Subu(sp, sp, Operand((argc + 1) * kPointerSize));
@@ -843,7 +789,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(true, call_data_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -867,9 +813,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ li(scratch1, Operand(receiver_map));
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1064,15 +1007,6 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization,
- Handle<Map> receiver_map) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1246,24 +1180,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch3(), 1, values);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -1271,20 +1187,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
// -- ra : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
- Register receiver = a1;
- Register value = a0;
// Save value register, so we can restore it later.
- __ push(value);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
@@ -1294,7 +1206,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
FieldMemOperand(
receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ Push(receiver, value);
+ __ Push(receiver, value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@@ -1322,21 +1234,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@@ -1344,10 +1241,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1381,16 +1274,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
+Register StoreStubCompiler::value() {
+ return a0;
+}
+
+
Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { a1, a2, a0, a3, t0, t1 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { a1, a2, a3, t0, t1 };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { a2, a1, a0, a3, t0, t1 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { a2, a1, a3, t0, t1 };
return registers;
}
@@ -1524,6 +1422,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 212bb0b9ca..d413b090b7 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -538,7 +538,7 @@ inherits(NumberMirror, ValueMirror);
NumberMirror.prototype.toText = function() {
- return %NumberToString(this.value_);
+ return %_NumberToString(this.value_);
};
@@ -889,9 +889,12 @@ FunctionMirror.prototype.script = function() {
// Return script if function is resolved. Otherwise just fall through
// to return undefined.
if (this.resolved()) {
+ if (this.script_) {
+ return this.script_;
+ }
var script = %FunctionGetScript(this.value_);
if (script) {
- return MakeMirror(script);
+ return this.script_ = MakeMirror(script);
}
}
};
@@ -917,9 +920,11 @@ FunctionMirror.prototype.sourcePosition_ = function() {
* @return {Location or undefined} in-script location for the function begin
*/
FunctionMirror.prototype.sourceLocation = function() {
- if (this.resolved() && this.script()) {
- return this.script().locationFromPosition(this.sourcePosition_(),
- true);
+ if (this.resolved()) {
+ var script = this.script();
+ if (script) {
+ return script.locationFromPosition(this.sourcePosition_(), true);
+ }
}
};
@@ -949,7 +954,10 @@ FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
FunctionMirror.prototype.scopeCount = function() {
if (this.resolved()) {
- return %GetFunctionScopeCount(this.value());
+ if (IS_UNDEFINED(this.scopeCount_)) {
+ this.scopeCount_ = %GetFunctionScopeCount(this.value());
+ }
+ return this.scopeCount_;
} else {
return 0;
}
@@ -1506,7 +1514,10 @@ FrameDetails.prototype.returnValue = function() {
FrameDetails.prototype.scopeCount = function() {
- return %GetScopeCount(this.break_id_, this.frameId());
+ if (IS_UNDEFINED(this.scopeCount_)) {
+ this.scopeCount_ = %GetScopeCount(this.break_id_, this.frameId());
+ }
+ return this.scopeCount_;
};
@@ -1532,12 +1543,21 @@ function FrameMirror(break_id, index) {
inherits(FrameMirror, Mirror);
+FrameMirror.prototype.details = function() {
+ return this.details_;
+};
+
+
FrameMirror.prototype.index = function() {
return this.index_;
};
FrameMirror.prototype.func = function() {
+ if (this.func_) {
+ return this.func_;
+ }
+
// Get the function for this frame from the VM.
var f = this.details_.func();
@@ -1545,7 +1565,7 @@ FrameMirror.prototype.func = function() {
// value returned from the VM might be a string if the function for the
// frame is unresolved.
if (IS_FUNCTION(f)) {
- return MakeMirror(f);
+ return this.func_ = MakeMirror(f);
} else {
return new UnresolvedFunctionMirror(f);
}
@@ -1628,39 +1648,36 @@ FrameMirror.prototype.sourcePosition = function() {
FrameMirror.prototype.sourceLocation = function() {
- if (this.func().resolved() && this.func().script()) {
- return this.func().script().locationFromPosition(this.sourcePosition(),
- true);
+ var func = this.func();
+ if (func.resolved()) {
+ var script = func.script();
+ if (script) {
+ return script.locationFromPosition(this.sourcePosition(), true);
+ }
}
};
FrameMirror.prototype.sourceLine = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.line;
- }
+ var location = this.sourceLocation();
+ if (location) {
+ return location.line;
}
};
FrameMirror.prototype.sourceColumn = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.column;
- }
+ var location = this.sourceLocation();
+ if (location) {
+ return location.column;
}
};
FrameMirror.prototype.sourceLineText = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.sourceText();
- }
+ var location = this.sourceLocation();
+ if (location) {
+ return location.sourceText();
}
};
@@ -1675,6 +1692,19 @@ FrameMirror.prototype.scope = function(index) {
};
+FrameMirror.prototype.allScopes = function(opt_ignore_nested_scopes) {
+ var scopeDetails = %GetAllScopesDetails(this.break_id_,
+ this.details_.frameId(),
+ this.details_.inlinedFrameIndex(),
+ !!opt_ignore_nested_scopes);
+ var result = [];
+ for (var i = 0; i < scopeDetails.length; ++i) {
+ result.push(new ScopeMirror(this, UNDEFINED, i, scopeDetails[i]));
+ }
+ return result;
+};
+
+
FrameMirror.prototype.stepInPositions = function() {
var script = this.func().script();
var funcOffset = this.func().sourcePosition_();
@@ -1793,9 +1823,10 @@ FrameMirror.prototype.sourceAndPositionText = function() {
var result = '';
var func = this.func();
if (func.resolved()) {
- if (func.script()) {
- if (func.script().name()) {
- result += func.script().name();
+ var script = func.script();
+ if (script) {
+ if (script.name()) {
+ result += script.name();
} else {
result += '[unnamed]';
}
@@ -1865,17 +1896,18 @@ FrameMirror.prototype.toText = function(opt_locals) {
var kScopeDetailsTypeIndex = 0;
var kScopeDetailsObjectIndex = 1;
-function ScopeDetails(frame, fun, index) {
+function ScopeDetails(frame, fun, index, opt_details) {
if (frame) {
this.break_id_ = frame.break_id_;
- this.details_ = %GetScopeDetails(frame.break_id_,
+ this.details_ = opt_details ||
+ %GetScopeDetails(frame.break_id_,
frame.details_.frameId(),
frame.details_.inlinedFrameIndex(),
index);
this.frame_id_ = frame.details_.frameId();
this.inlined_frame_id_ = frame.details_.inlinedFrameIndex();
} else {
- this.details_ = %GetFunctionScopeDetails(fun.value(), index);
+ this.details_ = opt_details || %GetFunctionScopeDetails(fun.value(), index);
this.fun_value_ = fun.value();
this.break_id_ = undefined;
}
@@ -1921,10 +1953,11 @@ ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
* @param {FrameMirror} frame The frame this scope is a part of
* @param {FunctionMirror} function The function this scope is a part of
* @param {number} index The scope index in the frame
+ * @param {Array=} opt_details Raw scope details data
* @constructor
* @extends Mirror
*/
-function ScopeMirror(frame, function, index) {
+function ScopeMirror(frame, function, index, opt_details) {
%_CallFunction(this, SCOPE_TYPE, Mirror);
if (frame) {
this.frame_index_ = frame.index_;
@@ -1932,11 +1965,16 @@ function ScopeMirror(frame, function, index) {
this.frame_index_ = undefined;
}
this.scope_index_ = index;
- this.details_ = new ScopeDetails(frame, function, index);
+ this.details_ = new ScopeDetails(frame, function, index, opt_details);
}
inherits(ScopeMirror, Mirror);
+ScopeMirror.prototype.details = function() {
+ return this.details_;
+};
+
+
ScopeMirror.prototype.frameIndex = function() {
return this.frame_index_;
};
@@ -2575,8 +2613,9 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
content.receiver = this.serializeReference(mirror.receiver());
var func = mirror.func();
content.func = this.serializeReference(func);
- if (func.script()) {
- content.script = this.serializeReference(func.script());
+ var script = func.script();
+ if (script) {
+ content.script = this.serializeReference(script);
}
content.constructCall = mirror.isConstructCall();
content.atReturn = mirror.isAtReturn();
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index 499b27eca1..e822f0bd49 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -56,40 +56,86 @@
// implementation of (1) and (2) have "optimized" states which represent
// common cases which can be handled more efficiently.
-var observationState = %GetObservationState();
-if (IS_UNDEFINED(observationState.callbackInfoMap)) {
- observationState.callbackInfoMap = %ObservationWeakMapCreate();
- observationState.objectInfoMap = %ObservationWeakMapCreate();
- observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
- observationState.pendingObservers = null;
- observationState.nextCallbackPriority = 0;
-}
-
-function ObservationWeakMap(map) {
- this.map_ = map;
-}
-
-ObservationWeakMap.prototype = {
- get: function(key) {
- key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
- return %WeakCollectionGet(this.map_, key);
- },
- set: function(key, value) {
- key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
- %WeakCollectionSet(this.map_, key, value);
- },
- has: function(key) {
- return !IS_UNDEFINED(this.get(key));
+var observationState;
+
+function GetObservationState() {
+ if (IS_UNDEFINED(observationState))
+ observationState = %GetObservationState();
+
+ if (IS_UNDEFINED(observationState.callbackInfoMap)) {
+ observationState.callbackInfoMap = %ObservationWeakMapCreate();
+ observationState.objectInfoMap = %ObservationWeakMapCreate();
+ observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
+ observationState.pendingObservers = null;
+ observationState.nextCallbackPriority = 0;
}
-};
-var callbackInfoMap =
- new ObservationWeakMap(observationState.callbackInfoMap);
-var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
-var notifierObjectInfoMap =
- new ObservationWeakMap(observationState.notifierObjectInfoMap);
+ return observationState;
+}
+
+function GetWeakMapWrapper() {
+ function MapWrapper(map) {
+ this.map_ = map;
+ };
+
+ MapWrapper.prototype = {
+ get: function(key) {
+ key = %UnwrapGlobalProxy(key);
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
+ return %WeakCollectionGet(this.map_, key);
+ },
+ set: function(key, value) {
+ key = %UnwrapGlobalProxy(key);
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
+ %WeakCollectionSet(this.map_, key, value);
+ },
+ has: function(key) {
+ return !IS_UNDEFINED(this.get(key));
+ }
+ };
+
+ return MapWrapper;
+}
+
+var contextMaps;
+
+function GetContextMaps() {
+ if (IS_UNDEFINED(contextMaps)) {
+ var map = GetWeakMapWrapper();
+ var observationState = GetObservationState();
+ contextMaps = {
+ callbackInfoMap: new map(observationState.callbackInfoMap),
+ objectInfoMap: new map(observationState.objectInfoMap),
+ notifierObjectInfoMap: new map(observationState.notifierObjectInfoMap)
+ };
+ }
+
+ return contextMaps;
+}
+
+function GetCallbackInfoMap() {
+ return GetContextMaps().callbackInfoMap;
+}
+
+function GetObjectInfoMap() {
+ return GetContextMaps().objectInfoMap;
+}
+
+function GetNotifierObjectInfoMap() {
+ return GetContextMaps().notifierObjectInfoMap;
+}
+
+function GetPendingObservers() {
+ return GetObservationState().pendingObservers;
+}
+
+function SetPendingObservers(pendingObservers) {
+ GetObservationState().pendingObservers = pendingObservers;
+}
+
+function GetNextCallbackPriority() {
+ return GetObservationState().nextCallbackPriority++;
+}
function nullProtoObject() {
return { __proto__: null };
@@ -180,23 +226,23 @@ function ObjectInfoGetOrCreate(object) {
performing: null,
performingCount: 0,
};
- objectInfoMap.set(object, objectInfo);
+ GetObjectInfoMap().set(object, objectInfo);
}
return objectInfo;
}
function ObjectInfoGet(object) {
- return objectInfoMap.get(object);
+ return GetObjectInfoMap().get(object);
}
function ObjectInfoGetFromNotifier(notifier) {
- return notifierObjectInfoMap.get(notifier);
+ return GetNotifierObjectInfoMap().get(notifier);
}
function ObjectInfoGetNotifier(objectInfo) {
if (IS_NULL(objectInfo.notifier)) {
objectInfo.notifier = { __proto__: notifierPrototype };
- notifierObjectInfoMap.set(objectInfo.notifier, objectInfo);
+ GetNotifierObjectInfoMap().set(objectInfo.notifier, objectInfo);
}
return objectInfo.notifier;
@@ -302,16 +348,16 @@ function AcceptArgIsValid(arg) {
// priority. When a change record must be enqueued for the callback, it
// normalizes. When delivery clears any pending change records, it re-optimizes.
function CallbackInfoGet(callback) {
- return callbackInfoMap.get(callback);
+ return GetCallbackInfoMap().get(callback);
}
function CallbackInfoGetOrCreate(callback) {
- var callbackInfo = callbackInfoMap.get(callback);
+ var callbackInfo = GetCallbackInfoMap().get(callback);
if (!IS_UNDEFINED(callbackInfo))
return callbackInfo;
- var priority = observationState.nextCallbackPriority++
- callbackInfoMap.set(callback, priority);
+ var priority = GetNextCallbackPriority();
+ GetCallbackInfoMap().set(callback, priority);
return priority;
}
@@ -323,12 +369,12 @@ function CallbackInfoGetPriority(callbackInfo) {
}
function CallbackInfoNormalize(callback) {
- var callbackInfo = callbackInfoMap.get(callback);
+ var callbackInfo = GetCallbackInfoMap().get(callback);
if (IS_NUMBER(callbackInfo)) {
var priority = callbackInfo;
callbackInfo = new InternalArray;
callbackInfo.priority = priority;
- callbackInfoMap.set(callback, callbackInfo);
+ GetCallbackInfoMap().set(callback, callbackInfo);
}
return callbackInfo;
}
@@ -390,11 +436,13 @@ function ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
}
var callbackInfo = CallbackInfoNormalize(callback);
- if (!observationState.pendingObservers)
- observationState.pendingObservers = nullProtoObject();
- observationState.pendingObservers[callbackInfo.priority] = callback;
+ if (IS_NULL(GetPendingObservers())) {
+ SetPendingObservers(nullProtoObject())
+ GetMicrotaskQueue().push(ObserveMicrotaskRunner);
+ %SetMicrotaskPending(true);
+ }
+ GetPendingObservers()[callbackInfo.priority] = callback;
callbackInfo.push(changeRecord);
- %SetMicrotaskPending(true);
}
function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
@@ -546,17 +594,17 @@ function ObjectGetNotifier(object) {
}
function CallbackDeliverPending(callback) {
- var callbackInfo = callbackInfoMap.get(callback);
+ var callbackInfo = GetCallbackInfoMap().get(callback);
if (IS_UNDEFINED(callbackInfo) || IS_NUMBER(callbackInfo))
return false;
// Clear the pending change records from callback and return it to its
// "optimized" state.
var priority = callbackInfo.priority;
- callbackInfoMap.set(callback, priority);
+ GetCallbackInfoMap().set(callback, priority);
- if (observationState.pendingObservers)
- delete observationState.pendingObservers[priority];
+ if (GetPendingObservers())
+ delete GetPendingObservers()[priority];
var delivered = [];
%MoveArrayContents(callbackInfo, delivered);
@@ -575,15 +623,14 @@ function ObjectDeliverChangeRecords(callback) {
}
function ObserveMicrotaskRunner() {
- var pendingObservers = observationState.pendingObservers;
+ var pendingObservers = GetPendingObservers();
if (pendingObservers) {
- observationState.pendingObservers = null;
+ SetPendingObservers(null);
for (var i in pendingObservers) {
CallbackDeliverPending(pendingObservers[i]);
}
}
}
-RunMicrotasks.runners.push(ObserveMicrotaskRunner);
function SetupObjectObserve() {
%CheckIsBootstrapping();
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index e33b46be79..ca025e6cf6 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -264,8 +264,9 @@ void FixedTypedArray<Traits>::FixedTypedArrayVerify() {
bool JSObject::ElementsAreSafeToExamine() {
- return (FLAG_use_gvn && FLAG_use_allocation_folding) ||
- reinterpret_cast<Map*>(elements()) !=
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ return reinterpret_cast<Map*>(elements()) !=
GetHeap()->one_pointer_filler_map();
}
@@ -274,7 +275,7 @@ void JSObject::JSObjectVerify() {
VerifyHeapPointer(properties());
VerifyHeapPointer(elements());
- if (GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
+ if (GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS) {
CHECK(this->elements()->IsFixedArray());
CHECK_GE(this->elements()->length(), 2);
}
@@ -367,7 +368,7 @@ void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
VerifyObjectField(kStorage1Offset);
VerifyObjectField(kStorage2Offset);
- VerifyHeapPointer(type_feedback_cells());
+ VerifyHeapPointer(feedback_vector());
}
@@ -403,6 +404,13 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
void ConstantPoolArray::ConstantPoolArrayVerify() {
CHECK(IsConstantPoolArray());
+ for (int i = 0; i < count_of_code_ptr_entries(); i++) {
+ Address code_entry = get_code_ptr_entry(first_code_ptr_index() + i);
+ VerifyPointer(Code::GetCodeFromTargetAddress(code_entry));
+ }
+ for (int i = 0; i < count_of_heap_ptr_entries(); i++) {
+ VerifyObjectField(OffsetOfElementAt(first_heap_ptr_index() + i));
+ }
}
@@ -490,7 +498,6 @@ void JSMessageObject::JSMessageObjectVerify() {
VerifyObjectField(kEndPositionOffset);
VerifyObjectField(kArgumentsOffset);
VerifyObjectField(kScriptOffset);
- VerifyObjectField(kStackTraceOffset);
VerifyObjectField(kStackFramesOffset);
}
@@ -636,7 +643,7 @@ void Code::VerifyEmbeddedObjectsDependency() {
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
Object* obj = it.rinfo()->target_object();
- if (IsWeakEmbeddedObject(kind(), obj)) {
+ if (IsWeakObject(obj)) {
if (obj->IsMap()) {
Map* map = Map::cast(obj);
CHECK(map->dependent_code()->Contains(
@@ -767,7 +774,8 @@ void JSArrayBufferView::JSArrayBufferViewVerify() {
CHECK(IsJSArrayBufferView());
JSObjectVerify();
VerifyPointer(buffer());
- CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined());
+ CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined()
+ || buffer() == Smi::FromInt(0));
VerifyPointer(byte_offset());
CHECK(byte_offset()->IsSmi() || byte_offset()->IsHeapNumber()
@@ -931,7 +939,6 @@ void Script::ScriptVerify() {
VerifyPointer(name());
line_offset()->SmiVerify();
column_offset()->SmiVerify();
- VerifyPointer(data());
VerifyPointer(wrapper());
type()->SmiVerify();
VerifyPointer(line_ends());
@@ -1054,7 +1061,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
dict->Capacity() - dict->NumberOfElements();
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
break;
}
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 65c46f0af3..9d550374e0 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -59,7 +59,7 @@ PropertyDetails::PropertyDetails(Smi* smi) {
}
-Smi* PropertyDetails::AsSmi() {
+Smi* PropertyDetails::AsSmi() const {
// Ensure the upper 2 bits have the same value by sign extending it. This is
// necessary to be able to use the 31st bit of the property details.
int value = value_ << 1;
@@ -67,7 +67,7 @@ Smi* PropertyDetails::AsSmi() {
}
-PropertyDetails PropertyDetails::AsDeleted() {
+PropertyDetails PropertyDetails::AsDeleted() const {
Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
return PropertyDetails(smi);
}
@@ -278,10 +278,9 @@ bool Object::HasValidElements() {
MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
Representation representation) {
- if (FLAG_track_fields && representation.IsSmi() && IsUninitialized()) {
+ if (representation.IsSmi() && IsUninitialized()) {
return Smi::FromInt(0);
}
- if (!FLAG_track_double_fields) return this;
if (!representation.IsDouble()) return this;
if (IsUninitialized()) {
return heap->AllocateHeapNumber(0);
@@ -650,12 +649,6 @@ bool MaybeObject::IsRetryAfterGC() {
}
-bool MaybeObject::IsOutOfMemory() {
- return HAS_FAILURE_TAG(this)
- && Failure::cast(this)->IsOutOfMemoryException();
-}
-
-
bool MaybeObject::IsException() {
return this == Failure::Exception();
}
@@ -760,16 +753,6 @@ bool Object::IsDependentCode() {
}
-bool Object::IsTypeFeedbackCells() {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a cache cells array. Since this is used for asserts we can check that
- // the length is plausible though.
- if (FixedArray::cast(this)->length() % 2 != 0) return false;
- return true;
-}
-
-
bool Object::IsContext() {
if (!Object::IsHeapObject()) return false;
Map* map = HeapObject::cast(this)->map();
@@ -937,7 +920,8 @@ bool Object::IsJSGlobalProxy() {
bool result = IsHeapObject() &&
(HeapObject::cast(this)->map()->instance_type() ==
JS_GLOBAL_PROXY_TYPE);
- ASSERT(!result || IsAccessCheckNeeded());
+ ASSERT(!result ||
+ HeapObject::cast(this)->map()->is_access_check_needed());
return result;
}
@@ -962,8 +946,14 @@ bool Object::IsUndetectableObject() {
bool Object::IsAccessCheckNeeded() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->is_access_check_needed();
+ if (!IsHeapObject()) return false;
+ if (IsJSGlobalProxy()) {
+ JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
+ GlobalObject* global =
+ proxy->GetIsolate()->context()->global_object();
+ return proxy->IsDetachedFrom(global);
+ }
+ return HeapObject::cast(this)->map()->is_access_check_needed();
}
@@ -1035,6 +1025,20 @@ bool Object::IsNaN() {
}
+Handle<Object> Object::ToSmi(Isolate* isolate, Handle<Object> object) {
+ if (object->IsSmi()) return object;
+ if (object->IsHeapNumber()) {
+ double value = Handle<HeapNumber>::cast(object)->value();
+ int int_value = FastD2I(value);
+ if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
+ return handle(Smi::FromInt(int_value), isolate);
+ }
+ }
+ return Handle<Object>();
+}
+
+
+// TODO(ishell): Use handlified version instead.
MaybeObject* Object::ToSmi() {
if (IsSmi()) return this;
if (IsHeapNumber()) {
@@ -1053,20 +1057,23 @@ bool Object::HasSpecificClassOf(String* name) {
}
-MaybeObject* Object::GetElement(Isolate* isolate, uint32_t index) {
+Handle<Object> Object::GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
ASSERT(AllowHeapAllocation::IsAllowed());
- return GetElementWithReceiver(isolate, this, index);
+ return Object::GetElementWithReceiver(isolate, object, object, index);
}
-Object* Object::GetElementNoExceptionThrown(Isolate* isolate, uint32_t index) {
- MaybeObject* maybe = GetElementWithReceiver(isolate, this, index);
- ASSERT(!maybe->IsFailure());
- Object* result = NULL; // Initialization to please compiler.
- maybe->ToObject(&result);
+Handle<Object> Object::GetElementNoExceptionThrown(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
+ Handle<Object> result =
+ Object::GetElementWithReceiver(isolate, object, object, index);
+ CHECK_NOT_EMPTY_HANDLE(isolate, result);
return result;
}
@@ -1222,11 +1229,6 @@ bool Failure::IsInternalError() const {
}
-bool Failure::IsOutOfMemoryException() const {
- return type() == OUT_OF_MEMORY_EXCEPTION;
-}
-
-
AllocationSpace Failure::allocation_space() const {
ASSERT_EQ(RETRY_AFTER_GC, type());
return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize)
@@ -1244,11 +1246,6 @@ Failure* Failure::Exception() {
}
-Failure* Failure::OutOfMemoryException(intptr_t value) {
- return Construct(OUT_OF_MEMORY_EXCEPTION, value);
-}
-
-
intptr_t Failure::value() const {
return static_cast<intptr_t>(
reinterpret_cast<uintptr_t>(this) >> kFailureTagSize);
@@ -1396,6 +1393,11 @@ void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
}
+void HeapObject::IterateNextCodeLink(ObjectVisitor* v, int offset) {
+ v->VisitNextCodeLink(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
+}
+
+
double HeapNumber::value() {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
@@ -1474,7 +1476,8 @@ void AllocationSite::MarkZombie() {
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
ElementsKind boilerplate_elements_kind) {
- if (IsFastSmiElementsKind(boilerplate_elements_kind)) {
+ if (FLAG_pretenuring_call_new ||
+ IsFastSmiElementsKind(boilerplate_elements_kind)) {
return TRACK_ALLOCATION_SITE;
}
@@ -1484,8 +1487,9 @@ AllocationSiteMode AllocationSite::GetMode(
AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
ElementsKind to) {
- if (IsFastSmiElementsKind(from) &&
- IsMoreGeneralElementsKindTransition(from, to)) {
+ if (FLAG_pretenuring_call_new ||
+ (IsFastSmiElementsKind(from) &&
+ IsMoreGeneralElementsKindTransition(from, to))) {
return TRACK_ALLOCATION_SITE;
}
@@ -1564,9 +1568,7 @@ inline bool AllocationSite::DigestPretenuringFeedback() {
set_pretenure_decision(result);
if (current_mode != GetPretenureMode()) {
decision_changed = true;
- dependent_code()->MarkCodeForDeoptimization(
- GetIsolate(),
- DependentCode::kAllocationSiteTenuringChangedGroup);
+ set_deopt_dependent_code(true);
}
}
@@ -1598,73 +1600,79 @@ void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
}
-MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
- uint32_t count,
- EnsureElementsMode mode) {
- ElementsKind current_kind = map()->elements_kind();
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Object** objects,
+ uint32_t count,
+ EnsureElementsMode mode) {
+ ElementsKind current_kind = object->map()->elements_kind();
ElementsKind target_kind = current_kind;
- ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
- bool is_holey = IsFastHoleyElementsKind(current_kind);
- if (current_kind == FAST_HOLEY_ELEMENTS) return this;
- Heap* heap = GetHeap();
- Object* the_hole = heap->the_hole_value();
- for (uint32_t i = 0; i < count; ++i) {
- Object* current = *objects++;
- if (current == the_hole) {
- is_holey = true;
- target_kind = GetHoleyElementsKind(target_kind);
- } else if (!current->IsSmi()) {
- if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
- if (IsFastSmiElementsKind(target_kind)) {
- if (is_holey) {
- target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
- } else {
- target_kind = FAST_DOUBLE_ELEMENTS;
+ {
+ DisallowHeapAllocation no_allocation;
+ ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
+ bool is_holey = IsFastHoleyElementsKind(current_kind);
+ if (current_kind == FAST_HOLEY_ELEMENTS) return;
+ Heap* heap = object->GetHeap();
+ Object* the_hole = heap->the_hole_value();
+ for (uint32_t i = 0; i < count; ++i) {
+ Object* current = *objects++;
+ if (current == the_hole) {
+ is_holey = true;
+ target_kind = GetHoleyElementsKind(target_kind);
+ } else if (!current->IsSmi()) {
+ if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
+ if (IsFastSmiElementsKind(target_kind)) {
+ if (is_holey) {
+ target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ }
}
+ } else if (is_holey) {
+ target_kind = FAST_HOLEY_ELEMENTS;
+ break;
+ } else {
+ target_kind = FAST_ELEMENTS;
}
- } else if (is_holey) {
- target_kind = FAST_HOLEY_ELEMENTS;
- break;
- } else {
- target_kind = FAST_ELEMENTS;
}
}
}
-
if (target_kind != current_kind) {
- return TransitionElementsKind(target_kind);
+ TransitionElementsKind(object, target_kind);
}
- return this;
}
-MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
- uint32_t length,
- EnsureElementsMode mode) {
- if (elements->map() != GetHeap()->fixed_double_array_map()) {
- ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
- elements->map() == GetHeap()->fixed_cow_array_map());
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Handle<FixedArrayBase> elements,
+ uint32_t length,
+ EnsureElementsMode mode) {
+ Heap* heap = object->GetHeap();
+ if (elements->map() != heap->fixed_double_array_map()) {
+ ASSERT(elements->map() == heap->fixed_array_map() ||
+ elements->map() == heap->fixed_cow_array_map());
if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
- Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
- return EnsureCanContainElements(objects, length, mode);
+ Object** objects =
+ Handle<FixedArray>::cast(elements)->GetFirstElementAddress();
+ EnsureCanContainElements(object, objects, length, mode);
+ return;
}
ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
- return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
- } else if (GetElementsKind() == FAST_SMI_ELEMENTS) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(elements);
+ if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
+ TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
+ } else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
for (uint32_t i = 0; i < length; ++i) {
if (double_array->is_the_hole(i)) {
- return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
+ TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
+ return;
}
}
- return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
+ TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
}
-
- return this;
}
@@ -1733,6 +1741,11 @@ void JSObject::initialize_elements() {
ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(map());
ASSERT(!GetHeap()->InNewSpace(empty_array));
WRITE_FIELD(this, kElementsOffset, empty_array);
+ } else if (map()->has_fixed_typed_array_elements()) {
+ FixedTypedArrayBase* empty_array =
+ GetHeap()->EmptyFixedTypedArrayForMap(map());
+ ASSERT(!GetHeap()->InNewSpace(empty_array));
+ WRITE_FIELD(this, kElementsOffset, empty_array);
} else {
UNREACHABLE();
}
@@ -1745,7 +1758,7 @@ MaybeObject* JSObject::ResetElements() {
SeededNumberDictionary* dictionary;
MaybeObject* maybe = SeededNumberDictionary::Allocate(GetHeap(), 0);
if (!maybe->To(&dictionary)) return maybe;
- if (map() == GetHeap()->non_strict_arguments_elements_map()) {
+ if (map() == GetHeap()->sloppy_arguments_elements_map()) {
FixedArray::cast(elements())->set(1, dictionary);
} else {
set_elements(dictionary);
@@ -2088,11 +2101,11 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
}
-
void Object::VerifyApiCallResultType() {
#if ENABLE_EXTRA_CHECKS
if (!(IsSmi() ||
IsString() ||
+ IsSymbol() ||
IsSpecObject() ||
IsHeapNumber() ||
IsUndefined() ||
@@ -2182,6 +2195,15 @@ MaybeObject* FixedDoubleArray::get(int index) {
}
+Handle<Object> FixedDoubleArray::get_as_handle(int index) {
+ if (is_the_hole(index)) {
+ return GetIsolate()->factory()->the_hole_value();
+ } else {
+ return GetIsolate()->factory()->NewNumber(get_scalar(index));
+ }
+}
+
+
void FixedDoubleArray::set(int index, double value) {
ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
@@ -2205,8 +2227,12 @@ bool FixedDoubleArray::is_the_hole(int index) {
}
-SMI_ACCESSORS(ConstantPoolArray, first_ptr_index, kFirstPointerIndexOffset)
-SMI_ACCESSORS(ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset)
+SMI_ACCESSORS(
+ ConstantPoolArray, first_code_ptr_index, kFirstCodePointerIndexOffset)
+SMI_ACCESSORS(
+ ConstantPoolArray, first_heap_ptr_index, kFirstHeapPointerIndexOffset)
+SMI_ACCESSORS(
+ ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset)
int ConstantPoolArray::first_int64_index() {
@@ -2215,12 +2241,17 @@ int ConstantPoolArray::first_int64_index() {
int ConstantPoolArray::count_of_int64_entries() {
- return first_ptr_index();
+ return first_code_ptr_index();
+}
+
+
+int ConstantPoolArray::count_of_code_ptr_entries() {
+ return first_heap_ptr_index() - first_code_ptr_index();
}
-int ConstantPoolArray::count_of_ptr_entries() {
- return first_int32_index() - first_ptr_index();
+int ConstantPoolArray::count_of_heap_ptr_entries() {
+ return first_int32_index() - first_heap_ptr_index();
}
@@ -2230,32 +2261,44 @@ int ConstantPoolArray::count_of_int32_entries() {
void ConstantPoolArray::SetEntryCounts(int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries) {
- set_first_ptr_index(number_of_int64_entries);
- set_first_int32_index(number_of_int64_entries + number_of_ptr_entries);
- set_length(number_of_int64_entries + number_of_ptr_entries +
- number_of_int32_entries);
+ int current_index = number_of_int64_entries;
+ set_first_code_ptr_index(current_index);
+ current_index += number_of_code_ptr_entries;
+ set_first_heap_ptr_index(current_index);
+ current_index += number_of_heap_ptr_entries;
+ set_first_int32_index(current_index);
+ current_index += number_of_int32_entries;
+ set_length(current_index);
}
int64_t ConstantPoolArray::get_int64_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= 0 && index < first_ptr_index());
+ ASSERT(index >= 0 && index < first_code_ptr_index());
return READ_INT64_FIELD(this, OffsetOfElementAt(index));
}
double ConstantPoolArray::get_int64_entry_as_double(int index) {
STATIC_ASSERT(kDoubleSize == kInt64Size);
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= 0 && index < first_ptr_index());
+ ASSERT(index >= 0 && index < first_code_ptr_index());
return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
}
-Object* ConstantPoolArray::get_ptr_entry(int index) {
+Address ConstantPoolArray::get_code_ptr_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_code_ptr_index() && index < first_heap_ptr_index());
+ return reinterpret_cast<Address>(READ_FIELD(this, OffsetOfElementAt(index)));
+}
+
+
+Object* ConstantPoolArray::get_heap_ptr_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_ptr_index() && index < first_int32_index());
+ ASSERT(index >= first_heap_ptr_index() && index < first_int32_index());
return READ_FIELD(this, OffsetOfElementAt(index));
}
@@ -2267,9 +2310,16 @@ int32_t ConstantPoolArray::get_int32_entry(int index) {
}
+void ConstantPoolArray::set(int index, Address value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_code_ptr_index() && index < first_heap_ptr_index());
+ WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast<Object*>(value));
+}
+
+
void ConstantPoolArray::set(int index, Object* value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_ptr_index() && index < first_int32_index());
+ ASSERT(index >= first_code_ptr_index() && index < first_int32_index());
WRITE_FIELD(this, OffsetOfElementAt(index), value);
WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
}
@@ -2277,7 +2327,7 @@ void ConstantPoolArray::set(int index, Object* value) {
void ConstantPoolArray::set(int index, int64_t value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ ASSERT(index >= first_int64_index() && index < first_code_ptr_index());
WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
}
@@ -2285,7 +2335,7 @@ void ConstantPoolArray::set(int index, int64_t value) {
void ConstantPoolArray::set(int index, double value) {
STATIC_ASSERT(kDoubleSize == kInt64Size);
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ ASSERT(index >= first_int64_index() && index < first_code_ptr_index());
WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
}
@@ -2719,7 +2769,8 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
DescriptorArray::WhitenessWitness::WhitenessWitness(FixedArray* array)
: marking_(array->GetHeap()->incremental_marking()) {
marking_->EnterNoMarkingScope();
- ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
+ ASSERT(!marking_->IsMarking() ||
+ Marking::Color(array) == Marking::WHITE_OBJECT);
}
@@ -2797,7 +2848,6 @@ CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
-CAST_ACCESSOR(TypeFeedbackCells)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(NormalizedMapCache)
@@ -3645,35 +3695,64 @@ void ExternalFloat64Array::set(int index, double value) {
}
-int FixedTypedArrayBase::size() {
+void* FixedTypedArrayBase::DataPtr() {
+ return FIELD_ADDR(this, kDataOffset);
+}
+
+
+int FixedTypedArrayBase::DataSize() {
InstanceType instance_type = map()->instance_type();
int element_size;
switch (instance_type) {
- case FIXED_UINT8_ARRAY_TYPE:
- case FIXED_INT8_ARRAY_TYPE:
- case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
- element_size = 1;
- break;
- case FIXED_UINT16_ARRAY_TYPE:
- case FIXED_INT16_ARRAY_TYPE:
- element_size = 2;
- break;
- case FIXED_UINT32_ARRAY_TYPE:
- case FIXED_INT32_ARRAY_TYPE:
- case FIXED_FLOAT32_ARRAY_TYPE:
- element_size = 4;
- break;
- case FIXED_FLOAT64_ARRAY_TYPE:
- element_size = 8;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ element_size = size; \
break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
default:
UNREACHABLE();
return 0;
}
- return OBJECT_POINTER_ALIGN(kDataOffset + length() * element_size);
+ return length() * element_size;
}
+int FixedTypedArrayBase::size() {
+ return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
+}
+
+
+uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
+
+
+uint8_t Uint8ClampedArrayTraits::defaultValue() { return 0; }
+
+
+int8_t Int8ArrayTraits::defaultValue() { return 0; }
+
+
+uint16_t Uint16ArrayTraits::defaultValue() { return 0; }
+
+
+int16_t Int16ArrayTraits::defaultValue() { return 0; }
+
+
+uint32_t Uint32ArrayTraits::defaultValue() { return 0; }
+
+
+int32_t Int32ArrayTraits::defaultValue() { return 0; }
+
+
+float Float32ArrayTraits::defaultValue() {
+ return static_cast<float>(OS::nan_value());
+}
+
+
+double Float64ArrayTraits::defaultValue() { return OS::nan_value(); }
+
+
template <class Traits>
typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
@@ -3709,6 +3788,47 @@ void FixedTypedArray<Float64ArrayTraits>::set(
template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from_int(int value) {
+ return static_cast<ElementType>(value);
+}
+
+
+template <> inline
+uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from_int(int value) {
+ if (value < 0) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from_double(
+ double value) {
+ return static_cast<ElementType>(DoubleToInt32(value));
+}
+
+
+template<> inline
+uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from_double(double value) {
+ if (value < 0) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(lrint(value));
+}
+
+
+template<> inline
+float FixedTypedArray<Float32ArrayTraits>::from_double(double value) {
+ return static_cast<float>(value);
+}
+
+
+template<> inline
+double FixedTypedArray<Float64ArrayTraits>::from_double(double value) {
+ return value;
+}
+
+
+template <class Traits>
MaybeObject* FixedTypedArray<Traits>::get(int index) {
return Traits::ToObject(GetHeap(), get_scalar(index));
}
@@ -3719,10 +3839,10 @@ MaybeObject* FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
if (index < static_cast<uint32_t>(length())) {
if (value->IsSmi()) {
int int_value = Smi::cast(value)->value();
- cast_value = static_cast<ElementType>(int_value);
+ cast_value = from_int(int_value);
} else if (value->IsHeapNumber()) {
double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<ElementType>(DoubleToInt32(double_value));
+ cast_value = from_double(double_value);
} else {
// Clamp undefined to the default value. All other types have been
// converted to a number type further up in the call chain.
@@ -3854,7 +3974,8 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_type == CONSTANT_POOL_ARRAY_TYPE) {
return ConstantPoolArray::SizeFor(
reinterpret_cast<ConstantPoolArray*>(this)->count_of_int64_entries(),
- reinterpret_cast<ConstantPoolArray*>(this)->count_of_ptr_entries(),
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_code_ptr_entries(),
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_heap_ptr_entries(),
reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries());
}
if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
@@ -3998,8 +4119,7 @@ void Map::set_is_shared(bool value) {
bool Map::is_shared() {
- return IsShared::decode(bit_field3());
-}
+ return IsShared::decode(bit_field3()); }
void Map::set_dictionary_map(bool value) {
@@ -4045,7 +4165,6 @@ void Map::deprecate() {
bool Map::is_deprecated() {
- if (!FLAG_track_fields) return false;
return Deprecated::decode(bit_field3());
}
@@ -4056,7 +4175,6 @@ void Map::set_migration_target(bool value) {
bool Map::is_migration_target() {
- if (!FLAG_track_fields) return false;
return IsMigrationTarget::decode(bit_field3());
}
@@ -4090,22 +4208,11 @@ bool Map::CanBeDeprecated() {
int descriptor = LastAdded();
for (int i = 0; i <= descriptor; i++) {
PropertyDetails details = instance_descriptors()->GetDetails(i);
- if (FLAG_track_fields && details.representation().IsNone()) {
- return true;
- }
- if (FLAG_track_fields && details.representation().IsSmi()) {
- return true;
- }
- if (FLAG_track_double_fields && details.representation().IsDouble()) {
- return true;
- }
- if (FLAG_track_heap_object_fields &&
- details.representation().IsHeapObject()) {
- return true;
- }
- if (FLAG_track_fields && details.type() == CONSTANT) {
- return true;
- }
+ if (details.representation().IsNone()) return true;
+ if (details.representation().IsSmi()) return true;
+ if (details.representation().IsDouble()) return true;
+ if (details.representation().IsHeapObject()) return true;
+ if (details.type() == CONSTANT) return true;
}
return false;
}
@@ -4211,16 +4318,8 @@ InlineCacheState Code::ic_state() {
ExtraICState Code::extra_ic_state() {
- ASSERT((is_inline_cache_stub() && !needs_extended_extra_ic_state(kind()))
- || ic_state() == DEBUG_STUB);
- return ExtractExtraICStateFromFlags(flags());
-}
-
-
-ExtraICState Code::extended_extra_ic_state() {
ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
- ASSERT(needs_extended_extra_ic_state(kind()));
- return ExtractExtendedExtraICStateFromFlags(flags());
+ return ExtractExtraICStateFromFlags(flags());
}
@@ -4229,12 +4328,6 @@ Code::StubType Code::type() {
}
-int Code::arguments_count() {
- ASSERT(kind() == STUB || is_handler());
- return ExtractArgumentsCountFromFlags(flags());
-}
-
-
// For initialization.
void Code::set_raw_kind_specific_flags1(int value) {
WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
@@ -4438,7 +4531,7 @@ void Code::set_back_edges_patched_for_osr(bool value) {
byte Code::to_boolean_state() {
- return extended_extra_ic_state();
+ return extra_ic_state();
}
@@ -4509,18 +4602,13 @@ Code::Flags Code::ComputeFlags(Kind kind,
InlineCacheState ic_state,
ExtraICState extra_ic_state,
StubType type,
- int argc,
InlineCacheHolderFlag holder) {
- ASSERT(argc <= Code::kMaxArguments);
// Compute the bit mask.
unsigned int bits = KindField::encode(kind)
| ICStateField::encode(ic_state)
| TypeField::encode(type)
- | ExtendedExtraICStateField::encode(extra_ic_state)
+ | ExtraICStateField::encode(extra_ic_state)
| CacheHolderField::encode(holder);
- if (!Code::needs_extended_extra_ic_state(kind)) {
- bits |= (argc << kArgumentsCountShift);
- }
return static_cast<Flags>(bits);
}
@@ -4528,9 +4616,15 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
ExtraICState extra_ic_state,
InlineCacheHolderFlag holder,
- StubType type,
- int argc) {
- return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder);
+ StubType type) {
+ return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, holder);
+}
+
+
+Code::Flags Code::ComputeHandlerFlags(Kind handler_kind,
+ StubType type,
+ InlineCacheHolderFlag holder) {
+ return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, type, holder);
}
@@ -4549,22 +4643,11 @@ ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
}
-ExtraICState Code::ExtractExtendedExtraICStateFromFlags(
- Flags flags) {
- return ExtendedExtraICStateField::decode(flags);
-}
-
-
Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
return TypeField::decode(flags);
}
-int Code::ExtractArgumentsCountFromFlags(Flags flags) {
- return (flags & kArgumentsCountMask) >> kArgumentsCountShift;
-}
-
-
InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
return CacheHolderField::decode(flags);
}
@@ -4593,6 +4676,39 @@ Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
}
+bool Code::IsWeakObjectInOptimizedCode(Object* object) {
+ ASSERT(is_optimized_code());
+ if (object->IsMap()) {
+ return Map::cast(object)->CanTransition() &&
+ FLAG_collect_maps &&
+ FLAG_weak_embedded_maps_in_optimized_code;
+ }
+ if (object->IsJSObject() ||
+ (object->IsCell() && Cell::cast(object)->value()->IsJSObject())) {
+ return FLAG_weak_embedded_objects_in_optimized_code;
+ }
+ return false;
+}
+
+
+class Code::FindAndReplacePattern {
+ public:
+ FindAndReplacePattern() : count_(0) { }
+ void Add(Handle<Map> map_to_find, Handle<Object> obj_to_replace) {
+ ASSERT(count_ < kMaxCount);
+ find_[count_] = map_to_find;
+ replace_[count_] = obj_to_replace;
+ ++count_;
+ }
+ private:
+ static const int kMaxCount = 4;
+ int count_;
+ Handle<Map> find_[kMaxCount];
+ Handle<Object> replace_[kMaxCount];
+ friend class Code;
+};
+
+
Object* Map::prototype() {
return READ_FIELD(this, kPrototypeOffset);
}
@@ -4938,7 +5054,6 @@ ACCESSORS(Script, name, Object, kNameOffset)
ACCESSORS(Script, id, Smi, kIdOffset)
ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
-ACCESSORS(Script, data, Object, kDataOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
ACCESSORS_TO_SMI(Script, type, kTypeOffset)
@@ -5147,39 +5262,21 @@ int SharedFunctionInfo::profiler_ticks() {
}
-LanguageMode SharedFunctionInfo::language_mode() {
- int hints = compiler_hints();
- if (BooleanBit::get(hints, kExtendedModeFunction)) {
- ASSERT(BooleanBit::get(hints, kStrictModeFunction));
- return EXTENDED_MODE;
- }
- return BooleanBit::get(hints, kStrictModeFunction)
- ? STRICT_MODE : CLASSIC_MODE;
+StrictMode SharedFunctionInfo::strict_mode() {
+ return BooleanBit::get(compiler_hints(), kStrictModeFunction)
+ ? STRICT : SLOPPY;
}
-void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
- // We only allow language mode transitions that go set the same language mode
- // again or go up in the chain:
- // CLASSIC_MODE -> STRICT_MODE -> EXTENDED_MODE.
- ASSERT(this->language_mode() == CLASSIC_MODE ||
- this->language_mode() == language_mode ||
- language_mode == EXTENDED_MODE);
+void SharedFunctionInfo::set_strict_mode(StrictMode strict_mode) {
+ // We only allow mode transitions from sloppy to strict.
+ ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
int hints = compiler_hints();
- hints = BooleanBit::set(
- hints, kStrictModeFunction, language_mode != CLASSIC_MODE);
- hints = BooleanBit::set(
- hints, kExtendedModeFunction, language_mode == EXTENDED_MODE);
+ hints = BooleanBit::set(hints, kStrictModeFunction, strict_mode == STRICT);
set_compiler_hints(hints);
}
-bool SharedFunctionInfo::is_classic_mode() {
- return !BooleanBit::get(compiler_hints(), kStrictModeFunction);
-}
-
-BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
- kExtendedModeFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
kInlineBuiltin)
@@ -5450,8 +5547,8 @@ void JSFunction::ReplaceCode(Code* code) {
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
if (was_optimized && is_optimized) {
- shared()->EvictFromOptimizedCodeMap(
- this->code(), "Replacing with another optimized code");
+ shared()->EvictFromOptimizedCodeMap(this->code(),
+ "Replacing with another optimized code");
}
set_code(code);
@@ -5686,7 +5783,6 @@ JSDate* JSDate::cast(Object* obj) {
ACCESSORS(JSMessageObject, type, String, kTypeOffset)
ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
-ACCESSORS(JSMessageObject, stack_trace, Object, kStackTraceOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
@@ -5705,12 +5801,14 @@ ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
ACCESSORS(Code, raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
+ACCESSORS(Code, next_code_link, Object, kNextCodeLinkOffset)
void Code::WipeOutHeader() {
WRITE_FIELD(this, kRelocationInfoOffset, NULL);
WRITE_FIELD(this, kHandlerTableOffset, NULL);
WRITE_FIELD(this, kDeoptimizationDataOffset, NULL);
+ WRITE_FIELD(this, kConstantPoolOffset, NULL);
// Do not wipe out e.g. a minor key.
if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
@@ -5732,20 +5830,6 @@ void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
}
-Object* Code::next_code_link() {
- CHECK(kind() == OPTIMIZED_FUNCTION);
- return raw_type_feedback_info();
-}
-
-
-void Code::set_next_code_link(Object* value, WriteBarrierMode mode) {
- CHECK(kind() == OPTIMIZED_FUNCTION);
- set_raw_type_feedback_info(value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
- value, mode);
-}
-
-
int Code::stub_info() {
ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
kind() == BINARY_OP_IC || kind() == LOAD_IC);
@@ -5932,7 +6016,7 @@ ElementsKind JSObject::GetElementsKind() {
fixed_array->IsFixedArray() &&
fixed_array->IsDictionary()) ||
(kind > DICTIONARY_ELEMENTS));
- ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+ ASSERT((kind != SLOPPY_ARGUMENTS_ELEMENTS) ||
(elements()->IsFixedArray() && elements()->length() >= 2));
}
#endif
@@ -5980,8 +6064,8 @@ bool JSObject::HasDictionaryElements() {
}
-bool JSObject::HasNonStrictArgumentsElements() {
- return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+bool JSObject::HasSloppyArgumentsElements() {
+ return GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS;
}
@@ -6013,6 +6097,20 @@ bool JSObject::HasFixedTypedArrayElements() {
}
+#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
+bool JSObject::HasFixed##Type##Elements() { \
+ HeapObject* array = elements(); \
+ ASSERT(array != NULL); \
+ if (!array->IsHeapObject()) \
+ return false; \
+ return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
+}
+
+TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
+
+#undef FIXED_TYPED_ELEMENTS_CHECK
+
+
bool JSObject::HasNamedInterceptor() {
return map()->has_named_interceptor();
}
@@ -6196,7 +6294,7 @@ bool JSReceiver::HasProperty(Handle<JSReceiver> object,
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return object->GetPropertyAttribute(*name) != ABSENT;
+ return GetPropertyAttribute(object, name) != ABSENT;
}
@@ -6206,25 +6304,28 @@ bool JSReceiver::HasLocalProperty(Handle<JSReceiver> object,
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return object->GetLocalPropertyAttribute(*name) != ABSENT;
+ return GetLocalPropertyAttribute(object, name) != ABSENT;
}
-PropertyAttributes JSReceiver::GetPropertyAttribute(Name* key) {
+PropertyAttributes JSReceiver::GetPropertyAttribute(Handle<JSReceiver> object,
+ Handle<Name> key) {
uint32_t index;
- if (IsJSObject() && key->AsArrayIndex(&index)) {
- return GetElementAttribute(index);
+ if (object->IsJSObject() && key->AsArrayIndex(&index)) {
+ return GetElementAttribute(object, index);
}
- return GetPropertyAttributeWithReceiver(this, key);
+ return GetPropertyAttributeWithReceiver(object, object, key);
}
-PropertyAttributes JSReceiver::GetElementAttribute(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
+PropertyAttributes JSReceiver::GetElementAttribute(Handle<JSReceiver> object,
+ uint32_t index) {
+ if (object->IsJSProxy()) {
+ return JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy>::cast(object), object, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, true);
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, true);
}
@@ -6257,8 +6358,8 @@ bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasElementWithHandler(proxy, index);
}
- return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
- *object, index, true) != ABSENT;
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, true) != ABSENT;
}
@@ -6267,17 +6368,19 @@ bool JSReceiver::HasLocalElement(Handle<JSReceiver> object, uint32_t index) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasElementWithHandler(proxy, index);
}
- return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
- *object, index, false) != ABSENT;
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, false) != ABSENT;
}
-PropertyAttributes JSReceiver::GetLocalElementAttribute(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
+PropertyAttributes JSReceiver::GetLocalElementAttribute(
+ Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ return JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy>::cast(object), object, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, false);
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, false);
}
@@ -6504,20 +6607,20 @@ void Map::ClearCodeCache(Heap* heap) {
}
-void JSArray::EnsureSize(int required_size) {
- ASSERT(HasFastSmiOrObjectElements());
- FixedArray* elts = FixedArray::cast(elements());
+void JSArray::EnsureSize(Handle<JSArray> array, int required_size) {
+ ASSERT(array->HasFastSmiOrObjectElements());
+ Handle<FixedArray> elts = handle(FixedArray::cast(array->elements()));
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
// Doubling in size would be overkill, but leave some slack to avoid
// constantly growing.
- Expand(required_size + (required_size >> 3));
+ Expand(array, required_size + (required_size >> 3));
// It's a performance benefit to keep a frequently used array in new-space.
- } else if (!GetHeap()->new_space()->Contains(elts) &&
+ } else if (!array->GetHeap()->new_space()->Contains(*elts) &&
required_size < kArraySizeThatFitsComfortablyInNewSpace) {
// Expand will allocate a new backing store in new space even if the size
// we asked for isn't larger than what we had before.
- Expand(required_size);
+ Expand(array, required_size);
}
}
@@ -6535,19 +6638,19 @@ bool JSArray::AllowsSetElementsLength() {
}
-MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
- MaybeObject* maybe_result = EnsureCanContainElements(
- storage, storage->length(), ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (maybe_result->IsFailure()) return maybe_result;
- ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
- IsFastDoubleElementsKind(GetElementsKind())) ||
- ((storage->map() != GetHeap()->fixed_double_array_map()) &&
- (IsFastObjectElementsKind(GetElementsKind()) ||
- (IsFastSmiElementsKind(GetElementsKind()) &&
- FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
- set_elements(storage);
- set_length(Smi::FromInt(storage->length()));
- return this;
+void JSArray::SetContent(Handle<JSArray> array,
+ Handle<FixedArrayBase> storage) {
+ EnsureCanContainElements(array, storage, storage->length(),
+ ALLOW_COPIED_DOUBLE_ELEMENTS);
+
+ ASSERT((storage->map() == array->GetHeap()->fixed_double_array_map() &&
+ IsFastDoubleElementsKind(array->GetElementsKind())) ||
+ ((storage->map() != array->GetHeap()->fixed_double_array_map()) &&
+ (IsFastObjectElementsKind(array->GetElementsKind()) ||
+ (IsFastSmiElementsKind(array->GetElementsKind()) &&
+ Handle<FixedArray>::cast(storage)->ContainsOnlySmisOrHoles()))));
+ array->set_elements(*storage);
+ array->set_length(Smi::FromInt(storage->length()));
}
@@ -6569,44 +6672,24 @@ MaybeObject* ConstantPoolArray::Copy() {
}
-void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
- set(1 + index * 2, Smi::FromInt(id.ToInt()));
-}
-
-
-TypeFeedbackId TypeFeedbackCells::AstId(int index) {
- return TypeFeedbackId(Smi::cast(get(1 + index * 2))->value());
-}
-
-
-void TypeFeedbackCells::SetCell(int index, Cell* cell) {
- set(index * 2, cell);
-}
-
-
-Cell* TypeFeedbackCells::GetCell(int index) {
- return Cell::cast(get(index * 2));
-}
-
-
-Handle<Object> TypeFeedbackCells::UninitializedSentinel(Isolate* isolate) {
- return isolate->factory()->the_hole_value();
+Handle<Object> TypeFeedbackInfo::UninitializedSentinel(Isolate* isolate) {
+ return isolate->factory()->uninitialized_symbol();
}
-Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
- return isolate->factory()->undefined_value();
+Handle<Object> TypeFeedbackInfo::MegamorphicSentinel(Isolate* isolate) {
+ return isolate->factory()->megamorphic_symbol();
}
-Handle<Object> TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate,
+Handle<Object> TypeFeedbackInfo::MonomorphicArraySentinel(Isolate* isolate,
ElementsKind elements_kind) {
return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
}
-Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
- return heap->the_hole_value();
+Object* TypeFeedbackInfo::RawUninitializedSentinel(Heap* heap) {
+ return heap->uninitialized_symbol();
}
@@ -6688,8 +6771,8 @@ bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
}
-ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
- kTypeFeedbackCellsOffset)
+ACCESSORS(TypeFeedbackInfo, feedback_vector, FixedArray,
+ kFeedbackVectorOffset)
SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 909d8f7421..518167cc51 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -378,7 +378,7 @@ void JSObject::PrintElements(FILE* out) {
case DICTIONARY_ELEMENTS:
elements()->Print(out);
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* p = FixedArray::cast(elements());
PrintF(out, " parameter map:");
for (int i = 2; i < p->length(); i++) {
@@ -400,28 +400,39 @@ void JSObject::PrintTransitions(FILE* out) {
if (!map()->HasTransitionArray()) return;
TransitionArray* transitions = map()->transitions();
for (int i = 0; i < transitions->number_of_transitions(); i++) {
+ Name* key = transitions->GetKey(i);
PrintF(out, " ");
- transitions->GetKey(i)->NamePrint(out);
+ key->NamePrint(out);
PrintF(out, ": ");
- switch (transitions->GetTargetDetails(i).type()) {
- case FIELD: {
- PrintF(out, " (transition to field)\n");
- break;
+ if (key == GetHeap()->frozen_symbol()) {
+ PrintF(out, " (transition to frozen)\n");
+ } else if (key == GetHeap()->elements_transition_symbol()) {
+ PrintF(out, " (transition to ");
+ PrintElementsKind(out, transitions->GetTarget(i)->elements_kind());
+ PrintF(out, ")\n");
+ } else if (key == GetHeap()->observed_symbol()) {
+ PrintF(out, " (transition to Object.observe)\n");
+ } else {
+ switch (transitions->GetTargetDetails(i).type()) {
+ case FIELD: {
+ PrintF(out, " (transition to field)\n");
+ break;
+ }
+ case CONSTANT:
+ PrintF(out, " (transition to constant)\n");
+ break;
+ case CALLBACKS:
+ PrintF(out, " (transition to callback)\n");
+ break;
+ // Values below are never in the target descriptor array.
+ case NORMAL:
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
}
- case CONSTANT:
- PrintF(out, " (transition to constant)\n");
- break;
- case CALLBACKS:
- PrintF(out, " (transition to callback)\n");
- break;
- // Values below are never in the target descriptor array.
- case NORMAL:
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
}
}
}
@@ -555,8 +566,8 @@ void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
PrintF(out, " - ic_total_count: %d, ic_with_type_info_count: %d\n",
ic_total_count(), ic_with_type_info_count());
- PrintF(out, " - type_feedback_cells: ");
- type_feedback_cells()->FixedArrayPrint(out);
+ PrintF(out, " - feedback_vector: ");
+ feedback_vector()->FixedArrayPrint(out);
}
@@ -595,11 +606,14 @@ void ConstantPoolArray::ConstantPoolArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "ConstantPoolArray");
PrintF(out, " - length: %d", length());
for (int i = 0; i < length(); i++) {
- if (i < first_ptr_index()) {
+ if (i < first_code_ptr_index()) {
PrintF(out, "\n [%d]: double: %g", i, get_int64_entry_as_double(i));
+ } else if (i < first_heap_ptr_index()) {
+ PrintF(out, "\n [%d]: code target pointer: %p", i,
+ reinterpret_cast<void*>(get_code_ptr_entry(i)));
} else if (i < first_int32_index()) {
- PrintF(out, "\n [%d]: pointer: %p", i,
- reinterpret_cast<void*>(get_ptr_entry(i)));
+ PrintF(out, "\n [%d]: heap pointer: %p", i,
+ reinterpret_cast<void*>(get_heap_ptr_entry(i)));
} else {
PrintF(out, "\n [%d]: int32: %d", i, get_int32_entry(i));
}
@@ -624,8 +638,6 @@ void JSMessageObject::JSMessageObjectPrint(FILE* out) {
PrintF(out, "\n - end_position: %d", end_position());
PrintF(out, "\n - script: ");
script()->ShortPrint(out);
- PrintF(out, "\n - stack_trace: ");
- stack_trace()->ShortPrint(out);
PrintF(out, "\n - stack_frames: ");
stack_frames()->ShortPrint(out);
PrintF(out, "\n");
@@ -1138,8 +1150,6 @@ void Script::ScriptPrint(FILE* out) {
type()->ShortPrint(out);
PrintF(out, "\n - id: ");
id()->ShortPrint(out);
- PrintF(out, "\n - data: ");
- data()->ShortPrint(out);
PrintF(out, "\n - context data: ");
context_data()->ShortPrint(out);
PrintF(out, "\n - wrapper: ");
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index 5201a7b318..31117bb945 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -270,7 +270,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
// TODO(ulan): It could be better to record slots only for strongly embedded
// objects here and record slots for weakly embedded object during clearing
// of non-live references in mark-compact.
- if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), object)) {
+ if (!rinfo->host()->IsWeakObject(object)) {
StaticVisitor::MarkObject(heap, object);
}
}
@@ -282,7 +282,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCell(
ASSERT(rinfo->rmode() == RelocInfo::CELL);
Cell* cell = rinfo->target_cell();
// No need to record slots because the cell space is not compacted during GC.
- if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), cell)) {
+ if (!rinfo->host()->IsWeakObject(cell)) {
StaticVisitor::MarkObject(heap, cell);
}
}
@@ -313,7 +313,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
&& (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
Serializer::enabled() || target->ic_age() != heap->global_ic_age())) {
- IC::Clear(target->GetIsolate(), rinfo->pc());
+ IC::Clear(target->GetIsolate(), rinfo->pc(),
+ rinfo->host()->constant_pool());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
@@ -427,7 +428,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCode(
Heap* heap = map->GetHeap();
Code* code = Code::cast(object);
if (FLAG_cleanup_code_caches_at_gc) {
- code->ClearTypeFeedbackCells(heap);
+ code->ClearTypeFeedbackInfo(heap);
}
if (FLAG_age_code && !Serializer::enabled()) {
code->MakeOlder(heap->mark_compact_collector()->marking_parity());
@@ -489,16 +490,16 @@ void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
- if (constant_pool->count_of_ptr_entries() > 0) {
- int first_ptr_offset = constant_pool->OffsetOfElementAt(
- constant_pool->first_ptr_index());
- int last_ptr_offset = constant_pool->OffsetOfElementAt(
- constant_pool->first_ptr_index() +
- constant_pool->count_of_ptr_entries() - 1);
- StaticVisitor::VisitPointers(
- heap,
- HeapObject::RawField(object, first_ptr_offset),
- HeapObject::RawField(object, last_ptr_offset));
+ for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) {
+ int index = constant_pool->first_code_ptr_index() + i;
+ Address code_entry =
+ reinterpret_cast<Address>(constant_pool->RawFieldOfElementAt(index));
+ StaticVisitor::VisitCodeEntry(heap, code_entry);
+ }
+ for (int i = 0; i < constant_pool->count_of_heap_ptr_entries(); i++) {
+ int index = constant_pool->first_heap_ptr_index() + i;
+ StaticVisitor::VisitPointer(heap,
+ constant_pool->RawFieldOfElementAt(index));
}
}
@@ -898,6 +899,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
IteratePointer(v, kHandlerTableOffset);
IteratePointer(v, kDeoptimizationDataOffset);
IteratePointer(v, kTypeFeedbackInfoOffset);
+ IterateNextCodeLink(v, kNextCodeLinkOffset);
IteratePointer(v, kConstantPoolOffset);
RelocIterator it(this, mode_mask);
@@ -932,6 +934,9 @@ void Code::CodeIterateBody(Heap* heap) {
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
+ StaticVisitor::VisitNextCodeLink(
+ heap,
+ reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kConstantPoolOffset));
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index 41e5fd6fd3..de8ca6d055 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -414,6 +414,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitExternalReference(RelocInfo* rinfo)) { }
INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) { }
+ // Skip the weak next code link in a code object.
+ INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) { }
// TODO(mstarzinger): This should be made protected once refactoring is done.
// Mark non-optimize code for functions inlined into the given optimized
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 15c12db4e0..45220ee291 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -80,6 +80,8 @@ MaybeObject* Object::ToObject(Context* native_context) {
return CreateJSValue(native_context->boolean_function(), this);
} else if (IsString()) {
return CreateJSValue(native_context->string_function(), this);
+ } else if (IsSymbol()) {
+ return CreateJSValue(native_context->symbol_function(), this);
}
ASSERT(IsJSObject());
return this;
@@ -491,19 +493,11 @@ Handle<Object> Object::GetProperty(Handle<Object> object,
// method (or somewhere else entirely). Needs more global clean-up.
uint32_t index;
Isolate* isolate = name->GetIsolate();
- if (name->AsArrayIndex(&index))
- return GetElement(isolate, object, index);
+ if (name->AsArrayIndex(&index)) return GetElement(isolate, object, index);
CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object);
}
-Handle<Object> Object::GetElement(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
- CALL_HEAP_FUNCTION(isolate, object->GetElement(isolate, index), Object);
-}
-
-
MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
uint32_t index) {
String* name;
@@ -517,7 +511,7 @@ Handle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
return SetPropertyWithHandler(
@@ -613,29 +607,29 @@ Handle<Object> JSObject::GetPropertyWithFailedAccessCheck(
// No accessible property found.
*attributes = ABSENT;
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_GET);
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_GET);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
+ Handle<JSObject> object,
LookupResult* result,
- Name* name,
+ Handle<Name> name,
bool continue_search) {
if (result->IsProperty()) {
switch (result->type()) {
case CALLBACKS: {
// Only allow API accessors.
- Object* obj = result->GetCallbackObject();
+ Handle<Object> obj(result->GetCallbackObject(), object->GetIsolate());
if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(obj);
if (info->all_can_read()) {
return result->GetAttributes();
}
} else if (obj->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(obj);
+ Handle<AccessorPair> pair = Handle<AccessorPair>::cast(obj);
if (pair->all_can_read()) {
return result->GetAttributes();
}
@@ -648,13 +642,11 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
case CONSTANT: {
if (!continue_search) break;
// Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
+ LookupResult r(object->GetIsolate());
+ result->holder()->LookupRealNamedPropertyInPrototypes(*name, &r);
if (r.IsProperty()) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
+ return GetPropertyAttributeWithFailedAccessCheck(
+ object, &r, name, continue_search);
}
break;
}
@@ -662,17 +654,15 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
case INTERCEPTOR: {
// If the object has an interceptor, try real named properties.
// No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r(GetIsolate());
+ LookupResult r(object->GetIsolate());
if (continue_search) {
- result->holder()->LookupRealNamedProperty(name, &r);
+ result->holder()->LookupRealNamedProperty(*name, &r);
} else {
- result->holder()->LocalLookupRealNamedProperty(name, &r);
+ result->holder()->LocalLookupRealNamedProperty(*name, &r);
}
if (!r.IsFound()) break;
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
+ return GetPropertyAttributeWithFailedAccessCheck(
+ object, &r, name, continue_search);
}
case HANDLER:
@@ -682,12 +672,12 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
}
}
- GetIsolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ object->GetIsolate()->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
return ABSENT;
}
-Object* JSObject::GetNormalizedProperty(LookupResult* result) {
+Object* JSObject::GetNormalizedProperty(const LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
if (IsGlobalObject()) {
@@ -699,7 +689,7 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
void JSObject::SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
+ const LookupResult* result,
Handle<Object> value) {
ASSERT(!object->HasFastProperties());
NameDictionary* property_dictionary = object->property_dictionary();
@@ -732,7 +722,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<NameDictionary> property_dictionary(object->property_dictionary());
if (!name->IsUniqueName()) {
- name = object->GetIsolate()->factory()->InternalizedStringFromString(
+ name = object->GetIsolate()->factory()->InternalizeString(
Handle<String>::cast(name));
}
@@ -972,63 +962,70 @@ MaybeObject* Object::GetProperty(Object* receiver,
}
-MaybeObject* Object::GetElementWithReceiver(Isolate* isolate,
- Object* receiver,
- uint32_t index) {
- Heap* heap = isolate->heap();
- Object* holder = this;
+Handle<Object> Object::GetElementWithReceiver(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> receiver,
+ uint32_t index) {
+ Handle<Object> holder;
// Iterate up the prototype chain until an element is found or the null
// prototype is encountered.
- for (holder = this;
- holder != heap->null_value();
- holder = holder->GetPrototype(isolate)) {
+ for (holder = object;
+ !holder->IsNull();
+ holder = Handle<Object>(holder->GetPrototype(isolate), isolate)) {
if (!holder->IsJSObject()) {
Context* native_context = isolate->context()->native_context();
if (holder->IsNumber()) {
- holder = native_context->number_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->number_function()->instance_prototype(), isolate);
} else if (holder->IsString()) {
- holder = native_context->string_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->string_function()->instance_prototype(), isolate);
} else if (holder->IsSymbol()) {
- holder = native_context->symbol_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->symbol_function()->instance_prototype(), isolate);
} else if (holder->IsBoolean()) {
- holder = native_context->boolean_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->boolean_function()->instance_prototype(), isolate);
} else if (holder->IsJSProxy()) {
- return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
+ CALL_HEAP_FUNCTION(isolate,
+ Handle<JSProxy>::cast(holder)->GetElementWithHandler(
+ *receiver, index),
+ Object);
} else {
// Undefined and null have no indexed properties.
ASSERT(holder->IsUndefined() || holder->IsNull());
- return heap->undefined_value();
+ return isolate->factory()->undefined_value();
}
}
// Inline the case for JSObjects. Doing so significantly improves the
// performance of fetching elements where checking the prototype chain is
// necessary.
- JSObject* js_object = JSObject::cast(holder);
+ Handle<JSObject> js_object = Handle<JSObject>::cast(holder);
// Check access rights if needed.
if (js_object->IsAccessCheckNeeded()) {
- Isolate* isolate = heap->isolate();
- if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_GET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return heap->undefined_value();
+ if (!isolate->MayIndexedAccessWrapper(js_object, index, v8::ACCESS_GET)) {
+ isolate->ReportFailedAccessCheckWrapper(js_object, v8::ACCESS_GET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
}
if (js_object->HasIndexedInterceptor()) {
- return js_object->GetElementWithInterceptor(receiver, index);
+ return JSObject::GetElementWithInterceptor(js_object, receiver, index);
}
- if (js_object->elements() != heap->empty_fixed_array()) {
- MaybeObject* result = js_object->GetElementsAccessor()->Get(
+ if (js_object->elements() != isolate->heap()->empty_fixed_array()) {
+ Handle<Object> result = js_object->GetElementsAccessor()->Get(
receiver, js_object, index);
- if (result != heap->the_hole_value()) return result;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
+ if (!result->IsTheHole()) return result;
}
}
- return heap->undefined_value();
+ return isolate->factory()->undefined_value();
}
@@ -1278,14 +1275,13 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// - the space the existing string occupies is too small for a regular
// external string.
// - the existing string is in old pointer space and the backing store of
- // the external string is not aligned. The GC cannot deal with fields
- // containing an unaligned address that points to outside of V8's heap.
+ // the external string is not aligned. The GC cannot deal with a field
+ // containing a possibly unaligned address to outside of V8's heap.
// In either case we resort to a short external string instead, omitting
// the field caching the address of the backing store. When we encounter
// short external strings in generated code, we need to bailout to runtime.
if (size < ExternalString::kSize ||
- (!IsAligned(reinterpret_cast<intptr_t>(resource->data()), kPointerSize) &&
- heap->old_pointer_space()->Contains(this))) {
+ heap->old_pointer_space()->Contains(this)) {
this->set_map_no_write_barrier(
is_internalized
? (is_ascii
@@ -1312,10 +1308,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
- new_size - size);
- }
+ heap->AdjustLiveBytes(this->address(), new_size - size, Heap::FROM_MUTATOR);
return true;
}
@@ -1349,14 +1342,13 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
// - the space the existing string occupies is too small for a regular
// external string.
// - the existing string is in old pointer space and the backing store of
- // the external string is not aligned. The GC cannot deal with fields
- // containing an unaligned address that points to outside of V8's heap.
+ // the external string is not aligned. The GC cannot deal with a field
+ // containing a possibly unaligned address to outside of V8's heap.
// In either case we resort to a short external string instead, omitting
// the field caching the address of the backing store. When we encounter
// short external strings in generated code, we need to bailout to runtime.
if (size < ExternalString::kSize ||
- (!IsAligned(reinterpret_cast<intptr_t>(resource->data()), kPointerSize) &&
- heap->old_pointer_space()->Contains(this))) {
+ heap->old_pointer_space()->Contains(this)) {
this->set_map_no_write_barrier(
is_internalized ? heap->short_external_ascii_internalized_string_map()
: heap->short_external_ascii_string_map());
@@ -1372,10 +1364,7 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
- new_size - size);
- }
+ heap->AdjustLiveBytes(this->address(), new_size - size, Heap::FROM_MUTATOR);
return true;
}
@@ -1543,17 +1532,18 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
void JSObject::PrintElementsTransition(
- FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
- ElementsKind to_kind, FixedArrayBase* to_elements) {
+ FILE* file, Handle<JSObject> object,
+ ElementsKind from_kind, Handle<FixedArrayBase> from_elements,
+ ElementsKind to_kind, Handle<FixedArrayBase> to_elements) {
if (from_kind != to_kind) {
PrintF(file, "elements transition [");
PrintElementsKind(file, from_kind);
PrintF(file, " -> ");
PrintElementsKind(file, to_kind);
PrintF(file, "] in ");
- JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
+ JavaScriptFrame::PrintTop(object->GetIsolate(), file, false, true);
PrintF(file, " for ");
- ShortPrint(file);
+ object->ShortPrint(file);
PrintF(file, " from ");
from_elements->ShortPrint(file);
PrintF(file, " to ");
@@ -1574,7 +1564,12 @@ void Map::PrintGeneralization(FILE* file,
PrintF(file, "[generalizing ");
constructor_name()->PrintOn(file);
PrintF(file, "] ");
- String::cast(instance_descriptors()->GetKey(modify_index))->PrintOn(file);
+ Name* name = instance_descriptors()->GetKey(modify_index);
+ if (name->IsString()) {
+ String::cast(name)->PrintOn(file);
+ } else {
+ PrintF(file, "{symbol %p}", static_cast<void*>(name));
+ }
if (constant_to_field) {
PrintF(file, ":c->f");
} else {
@@ -1614,7 +1609,7 @@ void JSObject::PrintInstanceMigration(FILE* file,
if (name->IsString()) {
String::cast(name)->PrintOn(file);
} else {
- PrintF(file, "???");
+ PrintF(file, "{symbol %p}", static_cast<void*>(name));
}
PrintF(file, " ");
}
@@ -1970,31 +1965,6 @@ static Handle<Object> NewStorageFor(Isolate* isolate,
}
-void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> new_map,
- Handle<Name> name,
- Handle<Object> value,
- int field_index,
- Representation representation) {
- Isolate* isolate = object->GetIsolate();
-
- // This method is used to transition to a field. If we are transitioning to a
- // double field, allocate new storage.
- Handle<Object> storage = NewStorageFor(isolate, value, representation);
-
- if (object->map()->unused_property_fields() == 0) {
- int new_unused = new_map->unused_property_fields();
- Handle<FixedArray> properties(object->properties());
- Handle<FixedArray> values = isolate->factory()->CopySizeFixedArray(
- properties, properties->length() + new_unused + 1);
- object->set_properties(*values);
- }
-
- object->set_map(*new_map);
- object->FastPropertyAtPut(field_index, *storage);
-}
-
-
static MaybeObject* CopyAddFieldDescriptor(Map* map,
Name* name,
int index,
@@ -2059,7 +2029,16 @@ void JSObject::AddFastProperty(Handle<JSObject> object,
Handle<Map> new_map = CopyAddFieldDescriptor(
handle(object->map()), name, index, attributes, representation, flag);
- AddFastPropertyUsingMap(object, new_map, name, value, index, representation);
+ JSObject::MigrateToMap(object, new_map);
+
+ if (representation.IsDouble()) {
+ // Nothing more to be done.
+ if (value->IsUninitialized()) return;
+ HeapNumber* box = HeapNumber::cast(object->RawFastPropertyAt(index));
+ box->set_value(value->Number());
+ } else {
+ object->FastPropertyAtPut(index, *value);
+ }
}
@@ -2103,7 +2082,7 @@ void JSObject::AddConstantProperty(Handle<JSObject> object,
Handle<Map> new_map = CopyAddConstantDescriptor(
handle(object->map()), name, constant, attributes, flag);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
}
@@ -2142,7 +2121,7 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
JSReceiver::StoreFromKeyed store_mode,
ExtensibilityCheck extensibility_check,
ValueType value_type,
@@ -2152,13 +2131,13 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
if (!name->IsUniqueName()) {
- name = isolate->factory()->InternalizedStringFromString(
+ name = isolate->factory()->InternalizeString(
Handle<String>::cast(name));
}
if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
!object->map()->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
+ if (strict_mode == SLOPPY) {
return value;
} else {
Handle<Object> args[1] = { name };
@@ -2192,8 +2171,7 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
AddSlowProperty(object, name, value, attributes);
}
- if (FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ if (object->map()->is_observed() &&
*name != isolate->heap()->hidden_string()) {
Handle<Object> old_value = isolate->factory()->the_hole_value();
EnqueueChangeRecord(object, "add", name, old_value);
@@ -2231,7 +2209,7 @@ Handle<Object> JSObject::SetPropertyPostInterceptor(
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// Check local property, ignore interceptor.
LookupResult result(object->GetIsolate());
object->LocalLookupRealNamedProperty(*name, &result);
@@ -2286,9 +2264,6 @@ const char* Representation::Mnemonic() const {
}
-enum RightTrimMode { FROM_GC, FROM_MUTATOR };
-
-
static void ZapEndOfFixedArray(Address new_end, int to_trim) {
// If we are doing a big trim in old space then we zap the space.
Object** zap = reinterpret_cast<Object**>(new_end);
@@ -2299,7 +2274,7 @@ static void ZapEndOfFixedArray(Address new_end, int to_trim) {
}
-template<RightTrimMode trim_mode>
+template<Heap::InvocationMode mode>
static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
ASSERT(elms->map() != heap->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
@@ -2311,7 +2286,7 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
- if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
+ if (mode != Heap::FROM_GC || Heap::ShouldZapGarbage()) {
ZapEndOfFixedArray(new_end, to_trim);
}
@@ -2324,14 +2299,7 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
elms->set_length(len - to_trim);
- // Maintain marking consistency for IncrementalMarking.
- if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
- if (trim_mode == FROM_GC) {
- MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
- } else {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
- }
+ heap->AdjustLiveBytes(elms->address(), -size_delta, mode);
// The array may not be moved during GC,
// and size has to be adjusted nevertheless.
@@ -2351,16 +2319,14 @@ bool Map::InstancesNeedRewriting(Map* target,
ASSERT(target_number_of_fields >= number_of_fields);
if (target_number_of_fields != number_of_fields) return true;
- if (FLAG_track_double_fields) {
- // If smi descriptors were replaced by double descriptors, rewrite.
- DescriptorArray* old_desc = instance_descriptors();
- DescriptorArray* new_desc = target->instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if (new_desc->GetDetails(i).representation().IsDouble() &&
- !old_desc->GetDetails(i).representation().IsDouble()) {
- return true;
- }
+ // If smi descriptors were replaced by double descriptors, rewrite.
+ DescriptorArray* old_desc = instance_descriptors();
+ DescriptorArray* new_desc = target->instance_descriptors();
+ int limit = NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ if (new_desc->GetDetails(i).representation().IsDouble() &&
+ !old_desc->GetDetails(i).representation().IsDouble()) {
+ return true;
}
}
@@ -2416,9 +2382,14 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors());
- int descriptors = new_map->NumberOfOwnDescriptors();
+ int old_nof = old_map->NumberOfOwnDescriptors();
+ int new_nof = new_map->NumberOfOwnDescriptors();
+
+ // This method only supports generalizing instances to at least the same
+ // number of properties.
+ ASSERT(old_nof <= new_nof);
- for (int i = 0; i < descriptors; i++) {
+ for (int i = 0; i < old_nof; i++) {
PropertyDetails details = new_descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
PropertyDetails old_details = old_descriptors->GetDetails(i);
@@ -2432,22 +2403,30 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
? old_descriptors->GetValue(i)
: object->RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
Handle<Object> value(raw_value, isolate);
- if (FLAG_track_double_fields &&
- !old_details.representation().IsDouble() &&
+ if (!old_details.representation().IsDouble() &&
details.representation().IsDouble()) {
if (old_details.representation().IsNone()) {
value = handle(Smi::FromInt(0), isolate);
}
value = NewStorageFor(isolate, value, details.representation());
}
- ASSERT(!(FLAG_track_double_fields &&
- details.representation().IsDouble() &&
- value->IsSmi()));
+ ASSERT(!(details.representation().IsDouble() && value->IsSmi()));
int target_index = new_descriptors->GetFieldIndex(i) - inobject;
if (target_index < 0) target_index += total_size;
array->set(target_index, *value);
}
+ for (int i = old_nof; i < new_nof; i++) {
+ PropertyDetails details = new_descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ if (details.representation().IsDouble()) {
+ int target_index = new_descriptors->GetFieldIndex(i) - inobject;
+ if (target_index < 0) target_index += total_size;
+ Handle<Object> box = isolate->factory()->NewHeapNumber(0);
+ array->set(target_index, *box);
+ }
+ }
+
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
@@ -2468,7 +2447,7 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
// If there are properties in the new backing store, trim it to the correct
// size and install the backing store into the object.
if (external > 0) {
- RightTrimFixedArray<FROM_MUTATOR>(isolate->heap(), *array, inobject);
+ RightTrimFixedArray<Heap::FROM_MUTATOR>(isolate->heap(), *array, inobject);
object->set_properties(*array);
}
@@ -2545,7 +2524,6 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
void Map::DeprecateTransitionTree() {
- if (!FLAG_track_fields) return;
if (is_deprecated()) return;
if (HasTransitionArray()) {
TransitionArray* transitions = this->transitions();
@@ -2577,6 +2555,7 @@ void Map::DeprecateTarget(Name* key, DescriptorArray* new_descriptors) {
DescriptorArray* to_replace = instance_descriptors();
Map* current = this;
+ GetHeap()->incremental_marking()->RecordWrites(to_replace);
while (current->instance_descriptors() == to_replace) {
current->SetEnumLength(kInvalidEnumCacheSentinel);
current->set_instance_descriptors(new_descriptors);
@@ -2625,6 +2604,8 @@ Map* Map::FindUpdatedMap(int verbatim,
current->instance_descriptors()->GetValue(i)) {
return NULL;
}
+ } else if (target_details.type() == CALLBACKS) {
+ return NULL;
}
}
@@ -2846,7 +2827,7 @@ Handle<Object> JSObject::SetPropertyWithInterceptor(
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
Isolate* isolate = object->GetIsolate();
@@ -2878,7 +2859,7 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode) {
LookupResult result(object->GetIsolate());
object->LocalLookup(*name, &result, true);
@@ -2895,7 +2876,7 @@ Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
Handle<JSObject> holder,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
@@ -2954,9 +2935,7 @@ Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object,
return SetPropertyWithDefinedSetter(
object, Handle<JSReceiver>::cast(setter), value);
} else {
- if (strict_mode == kNonStrictMode) {
- return value;
- }
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args[2] = { name, holder };
Handle<Object> error =
isolate->factory()->NewTypeError("no_setter_in_callback",
@@ -3007,7 +2986,7 @@ Handle<Object> JSObject::SetElementWithCallbackSetterInPrototypes(
uint32_t index,
Handle<Object> value,
bool* found,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate *isolate = object->GetIsolate();
for (Handle<Object> proto = handle(object->GetPrototype(), isolate);
!proto->IsNull();
@@ -3047,7 +3026,7 @@ Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done) {
Isolate* isolate = object->GetIsolate();
@@ -3065,14 +3044,12 @@ Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
*done = result.IsReadOnly();
break;
case INTERCEPTOR: {
- PropertyAttributes attr =
- result.holder()->GetPropertyAttributeWithInterceptor(
- *object, *name, true);
+ PropertyAttributes attr = GetPropertyAttributeWithInterceptor(
+ handle(result.holder()), object, name, true);
*done = !!(attr & READ_ONLY);
break;
}
case CALLBACKS: {
- if (!FLAG_es5_readonly && result.IsReadOnly()) break;
*done = true;
Handle<Object> callback_object(result.GetCallbackObject(), isolate);
return SetPropertyWithCallback(object, callback_object, name, value,
@@ -3091,9 +3068,8 @@ Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
}
// If we get here with *done true, we have encountered a read-only property.
- if (!FLAG_es5_readonly) *done = false;
if (*done) {
- if (strict_mode == kNonStrictMode) return value;
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args[] = { name, object };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
@@ -3135,7 +3111,7 @@ static int AppendUniqueCallbacks(NeanderArray* callbacks,
Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
if (entry->name()->IsUniqueName()) continue;
Handle<String> key =
- isolate->factory()->InternalizedStringFromString(
+ isolate->factory()->InternalizeString(
Handle<String>(String::cast(entry->name())));
entry->set_name(*key);
}
@@ -3257,24 +3233,31 @@ Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
Map* current_map = map;
- int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
- int to_index = IsFastElementsKind(to_kind)
- ? GetSequenceIndexFromFastElementsKind(to_kind)
- : GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ int target_kind =
+ IsFastElementsKind(to_kind) || IsExternalArrayElementsKind(to_kind)
+ ? to_kind
+ : TERMINAL_FAST_ELEMENTS_KIND;
- ASSERT(index <= to_index);
+ // Support for legacy API.
+ if (IsExternalArrayElementsKind(to_kind) &&
+ !IsFixedTypedArrayElementsKind(map->elements_kind())) {
+ return map;
+ }
- for (; index < to_index; ++index) {
+ ElementsKind kind = map->elements_kind();
+ while (kind != target_kind) {
+ kind = GetNextTransitionElementsKind(kind);
if (!current_map->HasElementsTransition()) return current_map;
current_map = current_map->elements_transition_map();
}
- if (!IsFastElementsKind(to_kind) && current_map->HasElementsTransition()) {
+
+ if (to_kind != kind && current_map->HasElementsTransition()) {
+ ASSERT(to_kind == DICTIONARY_ELEMENTS);
Map* next_map = current_map->elements_transition_map();
if (next_map->elements_kind() == to_kind) return next_map;
}
- ASSERT(IsFastElementsKind(to_kind)
- ? current_map->elements_kind() == to_kind
- : current_map->elements_kind() == TERMINAL_FAST_ELEMENTS_KIND);
+
+ ASSERT(current_map->elements_kind() == target_kind);
return current_map;
}
@@ -3302,26 +3285,21 @@ bool Map::IsMapInArrayPrototypeChain() {
static MaybeObject* AddMissingElementsTransitions(Map* map,
ElementsKind to_kind) {
- ASSERT(IsFastElementsKind(map->elements_kind()));
- int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
- int to_index = IsFastElementsKind(to_kind)
- ? GetSequenceIndexFromFastElementsKind(to_kind)
- : GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
-
- ASSERT(index <= to_index);
+ ASSERT(IsTransitionElementsKind(map->elements_kind()));
Map* current_map = map;
- for (; index < to_index; ++index) {
- ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(index + 1);
+ ElementsKind kind = map->elements_kind();
+ while (kind != to_kind && !IsTerminalElementsKind(kind)) {
+ kind = GetNextTransitionElementsKind(kind);
MaybeObject* maybe_next_map =
- current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
+ current_map->CopyAsElementsKind(kind, INSERT_TRANSITION);
if (!maybe_next_map->To(&current_map)) return maybe_next_map;
}
// In case we are exiting the fast elements kind system, just add the map in
// the end.
- if (!IsFastElementsKind(to_kind)) {
+ if (kind != to_kind) {
MaybeObject* maybe_next_map =
current_map->CopyAsElementsKind(to_kind, INSERT_TRANSITION);
if (!maybe_next_map->To(&current_map)) return maybe_next_map;
@@ -3353,7 +3331,7 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
// Only remember the map transition if there is not an already existing
// non-matching element transition.
!start_map->IsUndefined() && !start_map->is_shared() &&
- IsFastElementsKind(from_kind);
+ IsTransitionElementsKind(from_kind);
// Only store fast element maps in ascending generality.
if (IsFastElementsKind(to_kind)) {
@@ -3370,6 +3348,15 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
}
+// TODO(ishell): Temporary wrapper until handlified.
+// static
+Handle<Map> Map::AsElementsKind(Handle<Map> map, ElementsKind kind) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->AsElementsKind(kind),
+ Map);
+}
+
+
MaybeObject* Map::AsElementsKind(ElementsKind kind) {
Map* closest_map = FindClosestElementsTransition(this, kind);
@@ -3382,6 +3369,7 @@ MaybeObject* Map::AsElementsKind(ElementsKind kind) {
void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) {
+ DisallowHeapAllocation no_gc;
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
@@ -3461,7 +3449,7 @@ Handle<Object> JSObject::SetPropertyWithFailedAccessCheck(
Handle<Name> name,
Handle<Object> value,
bool check_prototype,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
if (check_prototype && !result->IsProperty()) {
object->LookupRealNamedPropertyInPrototypes(*name, result);
}
@@ -3517,7 +3505,7 @@ Handle<Object> JSObject::SetPropertyWithFailedAccessCheck(
}
Isolate* isolate = object->GetIsolate();
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return value;
}
@@ -3528,7 +3516,7 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode) {
if (result->IsHandler()) {
return JSProxy::SetPropertyWithHandler(handle(result->proxy()),
@@ -3560,7 +3548,7 @@ Handle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
@@ -3580,7 +3568,7 @@ Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done) {
Isolate* isolate = proxy->GetIsolate();
Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
@@ -3648,7 +3636,7 @@ Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
ASSERT(writable->IsTrue() || writable->IsFalse());
*done = writable->IsFalse();
if (!*done) return isolate->factory()->the_hole_value();
- if (strict_mode == kNonStrictMode) return value;
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args[] = { name, receiver };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
@@ -3667,7 +3655,7 @@ Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
receiver, Handle<JSReceiver>::cast(setter), value);
}
- if (strict_mode == kNonStrictMode) return value;
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args2[] = { name, proxy };
Handle<Object> error = isolate->factory()->NewTypeError(
"no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
@@ -3711,21 +3699,18 @@ Handle<Object> JSProxy::DeleteElementWithHandler(
}
-MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
- JSReceiver* receiver_raw,
- Name* name_raw) {
- Isolate* isolate = GetIsolate();
+PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name) {
+ Isolate* isolate = proxy->GetIsolate();
HandleScope scope(isolate);
- Handle<JSProxy> proxy(this);
- Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw, isolate);
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) return ABSENT;
Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
+ Handle<Object> result = proxy->CallTrap(
"getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return NONE;
@@ -3760,6 +3745,7 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
}
if (configurable->IsFalse()) {
+ Handle<Object> handler(proxy->handler(), isolate);
Handle<String> trap = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("getPropertyDescriptor"));
Handle<Object> args[] = { handler, trap, name };
@@ -3777,15 +3763,13 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
}
-MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
- JSReceiver* receiver_raw,
+PropertyAttributes JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
uint32_t index) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> proxy(this);
- Handle<JSReceiver> receiver(receiver_raw);
+ Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return proxy->GetPropertyAttributeWithHandler(*receiver, *name);
+ return GetPropertyAttributeWithHandler(proxy, receiver, name);
}
@@ -3861,16 +3845,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
}
map = MapAsElementsKind(map, to_kind);
}
- int total_size =
- map->NumberOfOwnDescriptors() + map->unused_property_fields();
- int out_of_object = total_size - map->inobject_properties();
- if (out_of_object != object->properties()->length()) {
- Isolate* isolate = object->GetIsolate();
- Handle<FixedArray> new_properties = isolate->factory()->CopySizeFixedArray(
- handle(object->properties()), out_of_object);
- object->set_properties(*new_properties);
- }
- object->set_map(*map);
+ JSObject::MigrateToMap(object, map);
}
@@ -3917,7 +3892,7 @@ Handle<Object> JSObject::SetPropertyUsingTransition(
// of the map. If we get a fast copy of the map, all field representations
// will be tagged since the transition is omitted.
return JSObject::AddProperty(
- object, name, value, attributes, kNonStrictMode,
+ object, name, value, attributes, SLOPPY,
JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
JSReceiver::OMIT_EXTENSIBILITY_CHECK,
JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION);
@@ -3926,29 +3901,31 @@ Handle<Object> JSObject::SetPropertyUsingTransition(
// Keep the target CONSTANT if the same value is stored.
// TODO(verwaest): Also support keeping the placeholder
// (value->IsUninitialized) as constant.
- if (details.type() == CONSTANT &&
- descriptors->GetValue(descriptor) == *value) {
- object->set_map(*transition_map);
- return value;
- }
-
- Representation representation = details.representation();
-
- if (!value->FitsRepresentation(representation) ||
- details.type() == CONSTANT) {
+ if (!value->FitsRepresentation(details.representation()) ||
+ (details.type() == CONSTANT &&
+ descriptors->GetValue(descriptor) != *value)) {
transition_map = Map::GeneralizeRepresentation(transition_map,
descriptor, value->OptimalRepresentation(), FORCE_FIELD);
- Object* back = transition_map->GetBackPointer();
- if (back->IsMap()) {
- MigrateToMap(object, handle(Map::cast(back)));
- }
- descriptors = transition_map->instance_descriptors();
- representation = descriptors->GetDetails(descriptor).representation();
}
+ JSObject::MigrateToMap(object, transition_map);
+
+ // Reload.
+ descriptors = transition_map->instance_descriptors();
+ details = descriptors->GetDetails(descriptor);
+
+ if (details.type() != FIELD) return value;
+
int field_index = descriptors->GetFieldIndex(descriptor);
- AddFastPropertyUsingMap(
- object, transition_map, name, value, field_index, representation);
+ if (details.representation().IsDouble()) {
+ // Nothing more to be done.
+ if (value->IsUninitialized()) return value;
+ HeapNumber* box = HeapNumber::cast(object->RawFastPropertyAt(field_index));
+ box->set_value(value->Number());
+ } else {
+ object->FastPropertyAtPut(field_index, *value);
+ }
+
return value;
}
@@ -3968,7 +3945,7 @@ static void SetPropertyToField(LookupResult* lookup,
representation = desc->GetDetails(descriptor).representation();
}
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
lookup->GetFieldIndex().field_index()));
storage->set_value(value->Number());
@@ -4029,7 +4006,7 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode) {
Isolate* isolate = object->GetIsolate();
@@ -4047,7 +4024,7 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ if (!isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(object, lookup, name, value,
true, strict_mode);
}
@@ -4078,7 +4055,7 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
}
if (lookup->IsProperty() && lookup->IsReadOnly()) {
- if (strict_mode == kStrictMode) {
+ if (strict_mode == STRICT) {
Handle<Object> args[] = { name, object };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
@@ -4090,11 +4067,11 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
if (is_observed && lookup->IsDataProperty()) {
old_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, old_value);
}
// This is a real property that is not read-only, or it is a
@@ -4140,6 +4117,7 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
object->LocalLookup(*name, &new_lookup, true);
if (new_lookup.IsDataProperty()) {
Handle<Object> new_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, new_value);
if (!new_value->SameValue(*old_value)) {
EnqueueChangeRecord(object, "update", name, old_value);
}
@@ -4182,9 +4160,9 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ if (!isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(object, &lookup, name, value,
- false, kNonStrictMode);
+ false, SLOPPY);
}
}
@@ -4207,18 +4185,19 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
TransitionFlag flag = lookup.IsFound()
? OMIT_TRANSITION : INSERT_TRANSITION;
// Neither properties nor transitions found.
- return AddProperty(object, name, value, attributes, kNonStrictMode,
+ return AddProperty(object, name, value, attributes, SLOPPY,
MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode, flag);
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
PropertyAttributes old_attributes = ABSENT;
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
if (is_observed && lookup.IsProperty()) {
- if (lookup.IsDataProperty()) old_value =
- Object::GetProperty(object, name);
+ if (lookup.IsDataProperty()) {
+ old_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, old_value);
+ }
old_attributes = lookup.GetAttributes();
}
@@ -4263,6 +4242,7 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
bool value_changed = false;
if (new_lookup.IsDataProperty()) {
Handle<Object> new_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, new_value);
value_changed = !old_value->SameValue(*new_value);
}
if (new_lookup.GetAttributes() != old_attributes) {
@@ -4279,20 +4259,22 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
- JSObject* receiver,
- Name* name,
- bool continue_search) {
+ Handle<JSObject> object,
+ Handle<JSObject> receiver,
+ Handle<Name> name,
+ bool continue_search) {
// Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
+ Isolate* isolate = object->GetIsolate();
+ LookupResult result(isolate);
+ object->LocalLookupRealNamedProperty(*name, &result);
if (result.IsFound()) return result.GetAttributes();
if (continue_search) {
// Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- if (!pt->IsNull()) {
- return JSObject::cast(pt)->
- GetPropertyAttributeWithReceiver(receiver, name);
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (!proto->IsNull()) {
+ return JSReceiver::GetPropertyAttributeWithReceiver(
+ Handle<JSObject>::cast(proto), receiver, name);
}
}
return ABSENT;
@@ -4300,31 +4282,30 @@ PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
- JSObject* receiver,
- Name* name,
- bool continue_search) {
+ Handle<JSObject> object,
+ Handle<JSObject> receiver,
+ Handle<Name> name,
+ bool continue_search) {
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return ABSENT;
- Isolate* isolate = GetIsolate();
+ Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- Handle<JSObject> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(String::cast(name));
- PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *receiver, *object);
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQueryCallback query =
v8::ToCData<v8::NamedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
+ ApiNamedPropertyAccess("interceptor-named-has", *object, *name));
v8::Handle<v8::Integer> result =
- args.Call(query, v8::Utils::ToLocal(name_handle));
+ args.Call(query, v8::Utils::ToLocal(Handle<String>::cast(name)));
if (!result.IsEmpty()) {
ASSERT(result->IsInt32());
return static_cast<PropertyAttributes>(result->Int32Value());
@@ -4333,44 +4314,45 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
v8::NamedPropertyGetterCallback getter =
v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
+ ApiNamedPropertyAccess("interceptor-named-get-has", *object, *name));
v8::Handle<v8::Value> result =
- args.Call(getter, v8::Utils::ToLocal(name_handle));
+ args.Call(getter, v8::Utils::ToLocal(Handle<String>::cast(name)));
if (!result.IsEmpty()) return DONT_ENUM;
}
- return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
- *name_handle,
- continue_search);
+ return GetPropertyAttributePostInterceptor(
+ object, receiver, name, continue_search);
}
PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
- JSReceiver* receiver,
- Name* key) {
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> receiver,
+ Handle<Name> key) {
uint32_t index = 0;
- if (IsJSObject() && key->AsArrayIndex(&index)) {
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- receiver, index, true);
+ if (object->IsJSObject() && key->AsArrayIndex(&index)) {
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), receiver, index, true);
}
// Named property.
- LookupResult lookup(GetIsolate());
- Lookup(key, &lookup);
- return GetPropertyAttributeForResult(receiver, &lookup, key, true);
+ LookupResult lookup(object->GetIsolate());
+ object->Lookup(*key, &lookup);
+ return GetPropertyAttributeForResult(object, receiver, &lookup, key, true);
}
PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
- JSReceiver* receiver,
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> receiver,
LookupResult* lookup,
- Name* name,
+ Handle<Name> name,
bool continue_search) {
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- JSObject* this_obj = JSObject::cast(this);
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) {
- return this_obj->GetPropertyAttributeWithFailedAccessCheck(
- receiver, lookup, name, continue_search);
+ if (object->IsAccessCheckNeeded()) {
+ Heap* heap = object->GetHeap();
+ Handle<JSObject> obj = Handle<JSObject>::cast(object);
+ if (!heap->isolate()->MayNamedAccessWrapper(obj, name, v8::ACCESS_HAS)) {
+ return JSObject::GetPropertyAttributeWithFailedAccessCheck(
+ obj, lookup, name, continue_search);
}
}
if (lookup->IsFound()) {
@@ -4381,12 +4363,15 @@ PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
case CALLBACKS:
return lookup->GetAttributes();
case HANDLER: {
- return JSProxy::cast(lookup->proxy())->GetPropertyAttributeWithHandler(
- receiver, name);
+ return JSProxy::GetPropertyAttributeWithHandler(
+ handle(lookup->proxy()), receiver, name);
}
case INTERCEPTOR:
- return lookup->holder()->GetPropertyAttributeWithInterceptor(
- JSObject::cast(receiver), name, continue_search);
+ return JSObject::GetPropertyAttributeWithInterceptor(
+ handle(lookup->holder()),
+ Handle<JSObject>::cast(receiver),
+ name,
+ continue_search);
case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
@@ -4396,67 +4381,74 @@ PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
}
-PropertyAttributes JSReceiver::GetLocalPropertyAttribute(Name* name) {
+PropertyAttributes JSReceiver::GetLocalPropertyAttribute(
+ Handle<JSReceiver> object, Handle<Name> name) {
// Check whether the name is an array index.
uint32_t index = 0;
- if (IsJSObject() && name->AsArrayIndex(&index)) {
- return GetLocalElementAttribute(index);
+ if (object->IsJSObject() && name->AsArrayIndex(&index)) {
+ return GetLocalElementAttribute(object, index);
}
// Named property.
- LookupResult lookup(GetIsolate());
- LocalLookup(name, &lookup, true);
- return GetPropertyAttributeForResult(this, &lookup, name, false);
+ LookupResult lookup(object->GetIsolate());
+ object->LocalLookup(*name, &lookup, true);
+ return GetPropertyAttributeForResult(object, object, &lookup, name, false);
}
PropertyAttributes JSObject::GetElementAttributeWithReceiver(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ bool continue_search) {
+ Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
return ABSENT;
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsNull()) return ABSENT;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->GetElementAttributeWithReceiver(
- receiver, index, continue_search);
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(proto), receiver, index, continue_search);
}
// Check for lookup interceptor except when bootstrapping.
- if (HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
- return GetElementAttributeWithInterceptor(receiver, index, continue_search);
+ if (object->HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
+ return JSObject::GetElementAttributeWithInterceptor(
+ object, receiver, index, continue_search);
}
return GetElementAttributeWithoutInterceptor(
- receiver, index, continue_search);
+ object, receiver, index, continue_search);
}
PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ bool continue_search) {
+ Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSReceiver> hreceiver(receiver);
- Handle<JSObject> holder(this);
- PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *receiver, *object);
if (!interceptor->query()->IsUndefined()) {
v8::IndexedPropertyQueryCallback query =
v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
+ ApiIndexedPropertyAccess("interceptor-indexed-has", *object, index));
v8::Handle<v8::Integer> result = args.Call(query, index);
if (!result.IsEmpty())
return static_cast<PropertyAttributes>(result->Int32Value());
@@ -4464,37 +4456,42 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
v8::IndexedPropertyGetterCallback getter =
v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
+ ApiIndexedPropertyAccess(
+ "interceptor-indexed-get-has", *object, index));
v8::Handle<v8::Value> result = args.Call(getter, index);
if (!result.IsEmpty()) return NONE;
}
- return holder->GetElementAttributeWithoutInterceptor(
- *hreceiver, index, continue_search);
+ return GetElementAttributeWithoutInterceptor(
+ object, receiver, index, continue_search);
}
PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- PropertyAttributes attr = GetElementsAccessor()->GetAttributes(
- receiver, this, index);
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ bool continue_search) {
+ PropertyAttributes attr = object->GetElementsAccessor()->GetAttributes(
+ *receiver, *object, index);
if (attr != ABSENT) return attr;
// Handle [] on String objects.
- if (IsStringObjectWithCharacterAt(index)) {
+ if (object->IsStringObjectWithCharacterAt(index)) {
return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
}
if (!continue_search) return ABSENT;
- Object* pt = GetPrototype();
- if (pt->IsJSProxy()) {
+ Handle<Object> proto(object->GetPrototype(), object->GetIsolate());
+ if (proto->IsJSProxy()) {
// We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(receiver, index);
+ return JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy>::cast(proto), receiver, index);
}
- if (pt->IsNull()) return ABSENT;
- return JSObject::cast(pt)->GetElementAttributeWithReceiver(
- receiver, index, true);
+ if (proto->IsNull()) return ABSENT;
+ return GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(proto), receiver, index, true);
}
@@ -4640,12 +4637,12 @@ void JSObject::NormalizeProperties(Handle<JSObject> object,
int new_instance_size = new_map->instance_size();
int instance_size_delta = map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- isolate->heap()->CreateFillerObjectAt(object->address() + new_instance_size,
- instance_size_delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(*object))) {
- MemoryChunk::IncrementLiveBytesFromMutator(object->address(),
- -instance_size_delta);
- }
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(object->address() + new_instance_size,
+ instance_size_delta);
+ heap->AdjustLiveBytes(object->address(),
+ -instance_size_delta,
+ Heap::FROM_MUTATOR);
object->set_map(*new_map);
map->NotifyLeafMapLayoutChange();
@@ -4674,119 +4671,92 @@ void JSObject::TransformToFastProperties(Handle<JSObject> object,
}
-static MUST_USE_RESULT MaybeObject* CopyFastElementsToDictionary(
- Isolate* isolate,
- FixedArrayBase* array,
+static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
+ Handle<FixedArrayBase> array,
int length,
- SeededNumberDictionary* dictionary) {
- Heap* heap = isolate->heap();
+ Handle<SeededNumberDictionary> dictionary) {
+ Isolate* isolate = array->GetIsolate();
+ Factory* factory = isolate->factory();
bool has_double_elements = array->IsFixedDoubleArray();
for (int i = 0; i < length; i++) {
- Object* value = NULL;
+ Handle<Object> value;
if (has_double_elements) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(array);
if (double_array->is_the_hole(i)) {
- value = isolate->heap()->the_hole_value();
+ value = factory->the_hole_value();
} else {
- // Objects must be allocated in the old object space, since the
- // overall number of HeapNumbers needed for the conversion might
- // exceed the capacity of new space, and we would fail repeatedly
- // trying to convert the FixedDoubleArray.
- MaybeObject* maybe_value_object =
- heap->AllocateHeapNumber(double_array->get_scalar(i), TENURED);
- if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
+ value = factory->NewHeapNumber(double_array->get_scalar(i));
}
} else {
- value = FixedArray::cast(array)->get(i);
+ value = handle(Handle<FixedArray>::cast(array)->get(i), isolate);
}
if (!value->IsTheHole()) {
PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
- MaybeObject* maybe_result =
- dictionary->AddNumberEntry(i, value, details);
- if (!maybe_result->To(&dictionary)) return maybe_result;
+ dictionary =
+ SeededNumberDictionary::AddNumberEntry(dictionary, i, value, details);
}
}
return dictionary;
}
-static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
- Handle<FixedArrayBase> array,
- int length,
- Handle<SeededNumberDictionary> dict) {
- Isolate* isolate = array->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- CopyFastElementsToDictionary(
- isolate, *array, length, *dict),
- SeededNumberDictionary);
-}
-
-
Handle<SeededNumberDictionary> JSObject::NormalizeElements(
Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->NormalizeElements(),
- SeededNumberDictionary);
-}
-
-
-MaybeObject* JSObject::NormalizeElements() {
- ASSERT(!HasExternalArrayElements());
+ ASSERT(!object->HasExternalArrayElements() &&
+ !object->HasFixedTypedArrayElements());
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
// Find the backing store.
- FixedArrayBase* array = FixedArrayBase::cast(elements());
- Map* old_map = array->map();
+ Handle<FixedArrayBase> array(FixedArrayBase::cast(object->elements()));
bool is_arguments =
- (old_map == old_map->GetHeap()->non_strict_arguments_elements_map());
+ (array->map() == isolate->heap()->sloppy_arguments_elements_map());
if (is_arguments) {
- array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
+ array = handle(FixedArrayBase::cast(
+ Handle<FixedArray>::cast(array)->get(1)));
}
- if (array->IsDictionary()) return array;
+ if (array->IsDictionary()) return Handle<SeededNumberDictionary>::cast(array);
- ASSERT(HasFastSmiOrObjectElements() ||
- HasFastDoubleElements() ||
- HasFastArgumentsElements());
+ ASSERT(object->HasFastSmiOrObjectElements() ||
+ object->HasFastDoubleElements() ||
+ object->HasFastArgumentsElements());
// Compute the effective length and allocate a new backing store.
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
+ int length = object->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
: array->length();
int old_capacity = 0;
int used_elements = 0;
- GetElementsCapacityAndUsage(&old_capacity, &used_elements);
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- SeededNumberDictionary::Allocate(GetHeap(), used_elements);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ object->GetElementsCapacityAndUsage(&old_capacity, &used_elements);
+ Handle<SeededNumberDictionary> dictionary =
+ factory->NewSeededNumberDictionary(used_elements);
- maybe_dictionary = CopyFastElementsToDictionary(
- GetIsolate(), array, length, dictionary);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = CopyFastElementsToDictionary(array, length, dictionary);
// Switch to using the dictionary as the backing storage for elements.
if (is_arguments) {
- FixedArray::cast(elements())->set(1, dictionary);
+ FixedArray::cast(object->elements())->set(1, *dictionary);
} else {
// Set the new map first to satify the elements type assert in
// set_elements().
- Map* new_map;
- MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(),
- DICTIONARY_ELEMENTS);
- if (!maybe->To(&new_map)) return maybe;
- set_map(new_map);
- set_elements(dictionary);
+ Handle<Map> new_map =
+ JSObject::GetElementsTransitionMap(object, DICTIONARY_ELEMENTS);
+
+ JSObject::MigrateToMap(object, new_map);
+ object->set_elements(*dictionary);
}
- old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
- Increment();
+ isolate->counters()->elements_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object elements have been normalized:\n");
- Print();
+ object->Print();
}
#endif
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
return dictionary;
}
@@ -4952,10 +4922,10 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
}
-bool JSObject::HasHiddenProperties() {
- return GetPropertyAttributePostInterceptor(this,
- GetHeap()->hidden_string(),
- false) != ABSENT;
+bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
+ Handle<Name> hidden = object->GetIsolate()->factory()->hidden_string();
+ return GetPropertyAttributePostInterceptor(
+ object, object, hidden, false) != ABSENT;
}
@@ -5036,7 +5006,7 @@ Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
// We can store the identity hash inline iff there is no backing store
// for hidden properties yet.
- ASSERT(object->HasHiddenProperties() != value->IsSmi());
+ ASSERT(JSObject::HasHiddenProperties(object) != value->IsSmi());
if (object->HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden string. Since the
@@ -5115,18 +5085,6 @@ Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
}
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<Object> AccessorDelete(Handle<JSObject> object,
- uint32_t index,
- JSObject::DeleteMode mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->GetElementsAccessor()->Delete(*object,
- index,
- mode),
- Object);
-}
-
-
Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
uint32_t index) {
Isolate* isolate = object->GetIsolate();
@@ -5153,7 +5111,8 @@ Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
// Rebox CustomArguments::kReturnValueOffset before returning.
return handle(*result_internal, isolate);
}
- Handle<Object> delete_result = AccessorDelete(object, index, NORMAL_DELETION);
+ Handle<Object> delete_result = object->GetElementsAccessor()->Delete(
+ object, index, NORMAL_DELETION);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return delete_result;
}
@@ -5167,8 +5126,8 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(*object, index, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
+ !isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_DELETE);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->false_value();
}
@@ -5196,12 +5155,14 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
Handle<Object> old_value;
bool should_enqueue_change_record = false;
- if (FLAG_harmony_observation && object->map()->is_observed()) {
+ if (object->map()->is_observed()) {
should_enqueue_change_record = HasLocalElement(object, index);
if (should_enqueue_change_record) {
- old_value = object->GetLocalElementAccessorPair(index) != NULL
- ? Handle<Object>::cast(factory->the_hole_value())
- : Object::GetElement(isolate, object, index);
+ if (object->GetLocalElementAccessorPair(index) != NULL) {
+ old_value = Handle<Object>::cast(factory->the_hole_value());
+ } else {
+ old_value = Object::GetElementNoExceptionThrown(isolate, object, index);
+ }
}
}
@@ -5210,7 +5171,7 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
if (object->HasIndexedInterceptor() && mode != FORCE_DELETION) {
result = DeleteElementWithInterceptor(object, index);
} else {
- result = AccessorDelete(object, index, mode);
+ result = object->GetElementsAccessor()->Delete(object, index, mode);
}
if (should_enqueue_change_record && !HasLocalElement(object, index)) {
@@ -5231,8 +5192,8 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
+ !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_DELETE);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5267,11 +5228,11 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
if (is_observed && lookup.IsDataProperty()) {
old_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, old_value);
}
Handle<Object> result;
@@ -5390,7 +5351,7 @@ bool JSObject::ReferencesObject(Object* obj) {
if (ReferencesObjectFromElements(elements, kind, obj)) return true;
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* parameter_map = FixedArray::cast(elements());
// Check the mapped parameters.
int length = parameter_map->length();
@@ -5412,7 +5373,7 @@ bool JSObject::ReferencesObject(Object* obj) {
// Get the constructor function for arguments array.
JSObject* arguments_boilerplate =
heap->isolate()->context()->native_context()->
- arguments_boilerplate();
+ sloppy_arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
@@ -5441,6 +5402,12 @@ bool JSObject::ReferencesObject(Object* obj) {
// Check the context extension (if any) if it can have references.
if (context->has_extension() && !context->IsCatchContext()) {
+ // With harmony scoping, a JSFunction may have a global context.
+ // TODO(mvstanton): walk into the ScopeInfo.
+ if (FLAG_harmony_scoping && context->IsGlobalContext()) {
+ return false;
+ }
+
return JSObject::cast(context->extension())->ReferencesObject(obj);
}
}
@@ -5456,10 +5423,10 @@ Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
if (!object->map()->is_extensible()) return object;
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(object,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_KEYS);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5472,7 +5439,8 @@ Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
}
// It's not possible to seal objects with external array elements
- if (object->HasExternalArrayElements()) {
+ if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
@@ -5495,10 +5463,10 @@ Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
Handle<Map> new_map = Map::Copy(handle(object->map()));
new_map->set_is_extensible(false);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
ASSERT(!object->map()->is_extensible());
- if (FLAG_harmony_observation && object->map()->is_observed()) {
+ if (object->map()->is_observed()) {
EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(),
isolate->factory()->the_hole_value());
}
@@ -5528,18 +5496,18 @@ static void FreezeDictionary(Dictionary* dictionary) {
Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
- // Freezing non-strict arguments should be handled elsewhere.
- ASSERT(!object->HasNonStrictArgumentsElements());
+ // Freezing sloppy arguments should be handled elsewhere.
+ ASSERT(!object->HasSloppyArgumentsElements());
ASSERT(!object->map()->is_observed());
if (object->map()->is_frozen()) return object;
Isolate* isolate = object->GetIsolate();
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(object,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_KEYS);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5552,7 +5520,8 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
}
// It's not possible to freeze objects with external array elements
- if (object->HasExternalArrayElements()) {
+ if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
@@ -5588,11 +5557,11 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
Handle<Map> old_map(object->map());
old_map->LookupTransition(*object, isolate->heap()->frozen_symbol(), &result);
if (result.IsTransition()) {
- Map* transition_map = result.GetTransitionTarget();
+ Handle<Map> transition_map(result.GetTransitionTarget());
ASSERT(transition_map->has_dictionary_elements());
ASSERT(transition_map->is_frozen());
ASSERT(!transition_map->is_extensible());
- object->set_map(transition_map);
+ JSObject::MigrateToMap(object, transition_map);
} else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
// Create a new descriptor array with fully-frozen properties
int num_descriptors = old_map->NumberOfOwnDescriptors();
@@ -5605,7 +5574,7 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
} else {
// Slow path: need to normalize properties for safety
NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
@@ -5616,7 +5585,7 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
// Freeze dictionary-mode properties
FreezeDictionary(object->property_dictionary());
@@ -5660,7 +5629,7 @@ void JSObject::SetObserved(Handle<JSObject> object) {
new_map = Map::Copy(handle(object->map()));
new_map->set_is_observed();
}
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
}
@@ -5781,7 +5750,7 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
ASSERT(names->get(i)->IsString());
Handle<String> key_string(String::cast(names->get(i)));
PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(*key_string);
+ JSReceiver::GetLocalPropertyAttribute(copy, key_string);
// Only deep copy fields from the object literal expression.
// In particular, don't try to copy the length attribute of
// an array.
@@ -5796,7 +5765,7 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (copying) {
// Creating object copy for literals. No strict mode needed.
CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetProperty(
- copy, key_string, result, NONE, kNonStrictMode));
+ copy, key_string, result, NONE, SLOPPY));
}
}
}
@@ -5855,7 +5824,7 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNIMPLEMENTED();
break;
@@ -5913,9 +5882,9 @@ bool JSReceiver::IsSimpleEnum() {
JSObject* curr = JSObject::cast(o);
int enum_length = curr->map()->EnumLength();
if (enum_length == kInvalidEnumCacheSentinel) return false;
+ if (curr->IsAccessCheckNeeded()) return false;
ASSERT(!curr->HasNamedInterceptor());
ASSERT(!curr->HasIndexedInterceptor());
- ASSERT(!curr->IsAccessCheckNeeded());
if (curr->NumberOfEnumElements() > 0) return false;
if (curr != this && enum_length != 0) return false;
}
@@ -6116,7 +6085,7 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
return;
}
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
// Ascertain whether we have read-only properties or an existing
// getter/setter pair in an arguments elements dictionary backing
// store.
@@ -6199,9 +6168,10 @@ void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
}
-bool JSObject::CanSetCallback(Name* name) {
- ASSERT(!IsAccessCheckNeeded() ||
- GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
+bool JSObject::CanSetCallback(Handle<JSObject> object, Handle<Name> name) {
+ Isolate* isolate = object->GetIsolate();
+ ASSERT(!object->IsAccessCheckNeeded() ||
+ isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET));
// Check if there is an API defined callback object which prohibits
// callback overwriting in this object or its prototype chain.
@@ -6209,15 +6179,15 @@ bool JSObject::CanSetCallback(Name* name) {
// certain accessors such as window.location should not be allowed
// to be overwritten because allowing overwriting could potentially
// cause security problems.
- LookupResult callback_result(GetIsolate());
- LookupCallbackProperty(name, &callback_result);
+ LookupResult callback_result(isolate);
+ object->LookupCallbackProperty(*name, &callback_result);
if (callback_result.IsFound()) {
- Object* obj = callback_result.GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- return !AccessorInfo::cast(obj)->prohibits_overwriting();
+ Object* callback_obj = callback_result.GetCallbackObject();
+ if (callback_obj->IsAccessorInfo()) {
+ return !AccessorInfo::cast(callback_obj)->prohibits_overwriting();
}
- if (obj->IsAccessorPair()) {
- return !AccessorPair::cast(obj)->prohibits_overwriting();
+ if (callback_obj->IsAccessorPair()) {
+ return !AccessorPair::cast(callback_obj)->prohibits_overwriting();
}
}
return true;
@@ -6267,7 +6237,7 @@ void JSObject::SetElementCallback(Handle<JSObject> object,
dictionary->set_requires_slow_elements();
// Update the dictionary backing store on the object.
- if (object->elements()->map() == heap->non_strict_arguments_elements_map()) {
+ if (object->elements()->map() == heap->sloppy_arguments_elements_map()) {
// Also delete any parameter alias.
//
// TODO(kmillikin): when deleting the last parameter alias we could
@@ -6324,8 +6294,8 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET);
return;
}
@@ -6349,21 +6319,20 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
// Try to flatten before operating on the string.
if (name->IsString()) String::cast(*name)->TryFlatten();
- if (!object->CanSetCallback(*name)) return;
+ if (!JSObject::CanSetCallback(object, name)) return;
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
bool preexists = false;
if (is_observed) {
if (is_element) {
preexists = HasLocalElement(object, index);
if (preexists && object->GetLocalElementAccessorPair(index) == NULL) {
- old_value = Object::GetElement(isolate, object, index);
+ old_value = Object::GetElementNoExceptionThrown(isolate, object, index);
}
} else {
LookupResult lookup(isolate);
@@ -6371,6 +6340,7 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
preexists = lookup.IsProperty();
if (preexists && lookup.IsDataProperty()) {
old_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, old_value);
}
}
}
@@ -6390,11 +6360,11 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
}
-static bool TryAccessorTransition(JSObject* self,
- Map* transitioned_map,
+static bool TryAccessorTransition(Handle<JSObject> self,
+ Handle<Map> transitioned_map,
int target_descriptor,
AccessorComponent component,
- Object* accessor,
+ Handle<Object> accessor,
PropertyAttributes attributes) {
DescriptorArray* descs = transitioned_map->instance_descriptors();
PropertyDetails details = descs->GetDetails(target_descriptor);
@@ -6408,8 +6378,8 @@ static bool TryAccessorTransition(JSObject* self,
PropertyAttributes target_attributes = details.attributes();
// Reuse transition if adding same accessor with same attributes.
- if (target_accessor == accessor && target_attributes == attributes) {
- self->set_map(transitioned_map);
+ if (target_accessor == *accessor && target_attributes == attributes) {
+ JSObject::MigrateToMap(self, transitioned_map);
return true;
}
@@ -6471,14 +6441,14 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
object->map()->LookupTransition(*object, *name, &result);
if (result.IsFound()) {
- Map* target = result.GetTransitionTarget();
+ Handle<Map> target(result.GetTransitionTarget());
ASSERT(target->NumberOfOwnDescriptors() ==
object->map()->NumberOfOwnDescriptors());
// This works since descriptors are sorted in order of addition.
ASSERT(object->map()->instance_descriptors()->
GetKey(descriptor_number) == *name);
- return TryAccessorTransition(*object, target, descriptor_number,
- component, *accessor, attributes);
+ return TryAccessorTransition(object, target, descriptor_number,
+ component, accessor, attributes);
}
} else {
// If not, lookup a transition.
@@ -6486,12 +6456,12 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
// If there is a transition, try to follow it.
if (result.IsFound()) {
- Map* target = result.GetTransitionTarget();
+ Handle<Map> target(result.GetTransitionTarget());
int descriptor_number = target->LastAdded();
ASSERT(target->instance_descriptors()->GetKey(descriptor_number)
->Equals(*name));
- return TryAccessorTransition(*object, target, descriptor_number,
- component, *accessor, attributes);
+ return TryAccessorTransition(object, target, descriptor_number,
+ component, accessor, attributes);
}
}
@@ -6504,7 +6474,7 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
accessors->set(component, *accessor);
Handle<Map> new_map = CopyInsertDescriptor(Handle<Map>(object->map()),
name, accessors, attributes);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
return true;
}
@@ -6517,8 +6487,8 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->undefined_value();
}
@@ -6537,7 +6507,9 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
// Try to flatten before operating on the string.
if (name->IsString()) FlattenString(Handle<String>::cast(name));
- if (!object->CanSetCallback(*name)) return factory->undefined_value();
+ if (!JSObject::CanSetCallback(object, name)) {
+ return factory->undefined_value();
+ }
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
@@ -6567,7 +6539,7 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
case DICTIONARY_ELEMENTS:
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNIMPLEMENTED();
break;
}
@@ -6601,8 +6573,8 @@ Handle<Object> JSObject::GetAccessor(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
@@ -6656,8 +6628,7 @@ Object* JSObject::SlowReverseLookup(Object* value) {
for (int i = 0; i < number_of_own_descriptors; i++) {
if (descs->GetType(i) == FIELD) {
Object* property = RawFastPropertyAt(descs->GetFieldIndex(i));
- if (FLAG_track_double_fields &&
- descs->GetDetails(i).representation().IsDouble()) {
+ if (descs->GetDetails(i).representation().IsDouble()) {
ASSERT(property->IsHeapNumber());
if (value->IsNumber() && property->Number() == value->Number()) {
return descs->GetKey(i);
@@ -6812,6 +6783,8 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
Map* map;
// Replace descriptors by new_descriptors in all maps that share it.
+
+ GetHeap()->incremental_marking()->RecordWrites(descriptors);
for (Object* current = GetBackPointer();
!current->IsUndefined();
current = map->GetBackPointer()) {
@@ -7509,9 +7482,11 @@ MaybeObject* CodeCache::UpdateNormalTypeCache(Name* name, Code* code) {
Object* CodeCache::Lookup(Name* name, Code::Flags flags) {
- flags = Code::RemoveTypeFromFlags(flags);
- Object* result = LookupDefaultCache(name, flags);
- if (result->IsCode()) return result;
+ Object* result = LookupDefaultCache(name, Code::RemoveTypeFromFlags(flags));
+ if (result->IsCode()) {
+ if (Code::cast(result)->flags() == flags) return result;
+ return GetHeap()->undefined_value();
+ }
return LookupNormalTypeCache(name, flags);
}
@@ -7859,7 +7834,8 @@ MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps,
void FixedArray::Shrink(int new_length) {
ASSERT(0 <= new_length && new_length <= length());
if (new_length < length()) {
- RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), this, length() - new_length);
+ RightTrimFixedArray<Heap::FROM_MUTATOR>(
+ GetHeap(), this, length() - new_length);
}
}
@@ -8224,7 +8200,7 @@ static bool IsIdentifier(UnicodeCache* cache, Name* name) {
// Checks whether the buffer contains an identifier (no escape).
if (!name->IsString()) return false;
String* string = String::cast(name);
- if (string->length() == 0) return false;
+ if (string->length() == 0) return true;
ConsStringIteratorOp op;
StringCharacterStream stream(string, &op);
if (!cache->IsIdentifierStart(stream.GetNext())) {
@@ -8240,9 +8216,7 @@ static bool IsIdentifier(UnicodeCache* cache, Name* name) {
bool Name::IsCacheable(Isolate* isolate) {
- return IsSymbol() ||
- IsIdentifier(isolate->unicode_cache(), this) ||
- this == isolate->heap()->hidden_string();
+ return IsSymbol() || IsIdentifier(isolate->unicode_cache(), this);
}
@@ -9199,10 +9173,7 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
// that are a multiple of pointer size.
heap->CreateFillerObjectAt(start_of_string + new_size, delta);
}
- if (Marking::IsBlack(Marking::MarkBitFrom(start_of_string))) {
- MemoryChunk::IncrementLiveBytesFromMutator(start_of_string, -delta);
- }
-
+ heap->AdjustLiveBytes(start_of_string, -delta, Heap::FROM_MUTATOR);
if (new_length == 0) return heap->isolate()->factory()->empty_string();
return string;
@@ -9308,11 +9279,12 @@ static void TrimEnumCache(Heap* heap, Map* map, DescriptorArray* descriptors) {
int to_trim = enum_cache->length() - live_enum;
if (to_trim <= 0) return;
- RightTrimFixedArray<FROM_GC>(heap, descriptors->GetEnumCache(), to_trim);
+ RightTrimFixedArray<Heap::FROM_GC>(
+ heap, descriptors->GetEnumCache(), to_trim);
if (!descriptors->HasEnumIndicesCache()) return;
FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
- RightTrimFixedArray<FROM_GC>(heap, enum_indices_cache, to_trim);
+ RightTrimFixedArray<Heap::FROM_GC>(heap, enum_indices_cache, to_trim);
}
@@ -9324,7 +9296,7 @@ static void TrimDescriptorArray(Heap* heap,
int to_trim = number_of_descriptors - number_of_own_descriptors;
if (to_trim == 0) return;
- RightTrimFixedArray<FROM_GC>(
+ RightTrimFixedArray<Heap::FROM_GC>(
heap, descriptors, to_trim * DescriptorArray::kDescriptorSize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
@@ -9398,7 +9370,7 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
int trim = t->number_of_transitions() - transition_index;
if (trim > 0) {
- RightTrimFixedArray<FROM_GC>(heap, t, t->IsSimpleTransition()
+ RightTrimFixedArray<Heap::FROM_GC>(heap, t, t->IsSimpleTransition()
? trim : trim * TransitionArray::kTransitionSize);
}
}
@@ -9448,13 +9420,13 @@ bool Map::EquivalentToForNormalization(Map* other,
void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
- if (count_of_ptr_entries() > 0) {
- int first_ptr_offset = OffsetOfElementAt(first_ptr_index());
- int last_ptr_offset =
- OffsetOfElementAt(first_ptr_index() + count_of_ptr_entries() - 1);
- v->VisitPointers(
- HeapObject::RawField(this, first_ptr_offset),
- HeapObject::RawField(this, last_ptr_offset));
+ for (int i = 0; i < count_of_code_ptr_entries(); i++) {
+ int index = first_code_ptr_index() + i;
+ v->VisitCodeEntry(reinterpret_cast<Address>(RawFieldOfElementAt(index)));
+ }
+ for (int i = 0; i < count_of_heap_ptr_entries(); i++) {
+ int index = first_heap_ptr_index() + i;
+ v->VisitPointer(RawFieldOfElementAt(index));
}
}
@@ -9622,38 +9594,42 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
const char* reason) {
if (optimized_code_map()->IsSmi()) return;
- int i;
- bool removed_entry = false;
FixedArray* code_map = FixedArray::cast(optimized_code_map());
- for (i = kEntriesStart; i < code_map->length(); i += kEntryLength) {
- ASSERT(code_map->get(i)->IsNativeContext());
- if (Code::cast(code_map->get(i + 1)) == optimized_code) {
+ int dst = kEntriesStart;
+ int length = code_map->length();
+ for (int src = kEntriesStart; src < length; src += kEntryLength) {
+ ASSERT(code_map->get(src)->IsNativeContext());
+ if (Code::cast(code_map->get(src + kCachedCodeOffset)) == optimized_code) {
+ // Evict the src entry by not copying it to the dst entry.
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
ShortPrint();
- PrintF("]\n");
+ BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
+ if (osr.IsNone()) {
+ PrintF("]\n");
+ } else {
+ PrintF(" (osr ast id %d)]\n", osr.ToInt());
+ }
}
- removed_entry = true;
- break;
+ } else {
+ // Keep the src entry by copying it to the dst entry.
+ if (dst != src) {
+ code_map->set(dst + kContextOffset,
+ code_map->get(src + kContextOffset));
+ code_map->set(dst + kCachedCodeOffset,
+ code_map->get(src + kCachedCodeOffset));
+ code_map->set(dst + kLiteralsOffset,
+ code_map->get(src + kLiteralsOffset));
+ code_map->set(dst + kOsrAstIdOffset,
+ code_map->get(src + kOsrAstIdOffset));
+ }
+ dst += kEntryLength;
}
}
- while (i < (code_map->length() - kEntryLength)) {
- code_map->set(i + kContextOffset,
- code_map->get(i + kContextOffset + kEntryLength));
- code_map->set(i + kCachedCodeOffset,
- code_map->get(i + kCachedCodeOffset + kEntryLength));
- code_map->set(i + kLiteralsOffset,
- code_map->get(i + kLiteralsOffset + kEntryLength));
- code_map->set(i + kOsrAstIdOffset,
- code_map->get(i + kOsrAstIdOffset + kEntryLength));
- i += kEntryLength;
- }
- if (removed_entry) {
+ if (dst != length) {
// Always trim even when array is cleared because of heap verifier.
- RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), code_map, kEntryLength);
- if (code_map->length() == kEntriesStart) {
- ClearOptimizedCodeMap();
- }
+ RightTrimFixedArray<Heap::FROM_MUTATOR>(GetHeap(), code_map, length - dst);
+ if (code_map->length() == kEntriesStart) ClearOptimizedCodeMap();
}
}
@@ -9663,7 +9639,7 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
ASSERT(shrink_by % kEntryLength == 0);
ASSERT(shrink_by <= code_map->length() - kEntriesStart);
// Always trim even when array is cleared because of heap verifier.
- RightTrimFixedArray<FROM_GC>(GetHeap(), code_map, shrink_by);
+ RightTrimFixedArray<Heap::FROM_GC>(GetHeap(), code_map, shrink_by);
if (code_map->length() == kEntriesStart) {
ClearOptimizedCodeMap();
}
@@ -9781,7 +9757,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
// different prototype.
Handle<Map> new_map = Map::Copy(handle(function->map()));
- function->set_map(*new_map);
+ JSObject::MigrateToMap(function, new_map);
new_map->set_constructor(*value);
new_map->set_non_instance_prototype(true);
Isolate* isolate = new_map->GetIsolate();
@@ -9798,15 +9774,15 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
void JSFunction::RemovePrototype() {
Context* native_context = context()->native_context();
- Map* no_prototype_map = shared()->is_classic_mode()
- ? native_context->function_without_prototype_map()
- : native_context->strict_mode_function_without_prototype_map();
+ Map* no_prototype_map = shared()->strict_mode() == SLOPPY
+ ? native_context->sloppy_function_without_prototype_map()
+ : native_context->strict_function_without_prototype_map();
if (map() == no_prototype_map) return;
- ASSERT(map() == (shared()->is_classic_mode()
- ? native_context->function_map()
- : native_context->strict_mode_function_map()));
+ ASSERT(map() == (shared()->strict_mode() == SLOPPY
+ ? native_context->sloppy_function_map()
+ : native_context->strict_function_map()));
set_map(no_prototype_map);
set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
@@ -10491,21 +10467,20 @@ Map* Code::FindFirstMap() {
}
-void Code::ReplaceNthObject(int n,
- Map* match_map,
- Object* replace_with) {
+void Code::FindAndReplace(const FindAndReplacePattern& pattern) {
ASSERT(is_inline_cache_stub() || is_handler());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ STATIC_ASSERT(FindAndReplacePattern::kMaxCount < 32);
+ int current_pattern = 0;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Object* object = info->target_object();
if (object->IsHeapObject()) {
- if (HeapObject::cast(object)->map() == match_map) {
- if (--n == 0) {
- info->set_target_object(replace_with);
- return;
- }
+ Map* map = HeapObject::cast(object)->map();
+ if (map == *pattern.find_[current_pattern]) {
+ info->set_target_object(*pattern.replace_[current_pattern]);
+ if (++current_pattern == pattern.count_) return;
}
}
}
@@ -10540,11 +10515,6 @@ void Code::FindAllTypes(TypeHandleList* types) {
}
-void Code::ReplaceFirstMap(Map* replace_with) {
- ReplaceNthObject(1, GetHeap()->meta_map(), replace_with);
-}
-
-
Code* Code::FindFirstHandler() {
ASSERT(is_inline_cache_stub());
DisallowHeapAllocation no_allocation;
@@ -10590,21 +10560,6 @@ Name* Code::FindFirstName() {
}
-void Code::ReplaceNthCell(int n, Cell* replace_with) {
- ASSERT(is_inline_cache_stub());
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::CELL);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (--n == 0) {
- info->set_target_cell(replace_with);
- return;
- }
- }
- UNREACHABLE();
-}
-
-
void Code::ClearInlineCaches() {
ClearInlineCaches(NULL);
}
@@ -10624,25 +10579,26 @@ void Code::ClearInlineCaches(Code::Kind* kind) {
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
if (target->is_inline_cache_stub()) {
if (kind == NULL || *kind == target->kind()) {
- IC::Clear(this->GetIsolate(), info->pc());
+ IC::Clear(this->GetIsolate(), info->pc(),
+ info->host()->constant_pool());
}
}
}
}
-void Code::ClearTypeFeedbackCells(Heap* heap) {
+void Code::ClearTypeFeedbackInfo(Heap* heap) {
if (kind() != FUNCTION) return;
Object* raw_info = type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
- TypeFeedbackCells* type_feedback_cells =
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
- for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- Cell* cell = type_feedback_cells->GetCell(i);
- // Don't clear AllocationSites
- Object* value = cell->value();
- if (value == NULL || !value->IsAllocationSite()) {
- cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+ FixedArray* feedback_vector =
+ TypeFeedbackInfo::cast(raw_info)->feedback_vector();
+ for (int i = 0; i < feedback_vector->length(); i++) {
+ Object* obj = feedback_vector->get(i);
+ if (!obj->IsAllocationSite()) {
+ // TODO(mvstanton): Can't I avoid a write barrier for this sentinel?
+ feedback_vector->set(i,
+ TypeFeedbackInfo::RawUninitializedSentinel(heap));
}
}
}
@@ -11068,9 +11024,7 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
switch (kind) {
case STORE_IC:
case KEYED_STORE_IC:
- if (extra == kStrictMode) {
- name = "STRICT";
- }
+ if (extra == STRICT) name = "STRICT";
break;
default:
break;
@@ -11091,8 +11045,7 @@ void Code::Disassemble(const char* name, FILE* out) {
}
if (is_inline_cache_stub()) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
- PrintExtraICState(out, kind(), needs_extended_extra_ic_state(kind()) ?
- extended_extra_ic_state() : extra_ic_state());
+ PrintExtraICState(out, kind(), extra_ic_state());
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", StubType2String(type()));
}
@@ -11192,33 +11145,20 @@ Handle<FixedArray> JSObject::SetFastElementsCapacityAndLength(
int capacity,
int length,
SetFastElementsCapacitySmiMode smi_mode) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetFastElementsCapacityAndLength(capacity, length, smi_mode),
- FixedArray);
-}
-
-
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(
- int capacity,
- int length,
- SetFastElementsCapacitySmiMode smi_mode) {
- Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
+ ASSERT(!object->HasExternalArrayElements());
// Allocate a new fast elements backing store.
- FixedArray* new_elements;
- MaybeObject* maybe = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe->To(&new_elements)) return maybe;
+ Handle<FixedArray> new_elements =
+ object->GetIsolate()->factory()->NewUninitializedFixedArray(capacity);
- ElementsKind elements_kind = GetElementsKind();
+ ElementsKind elements_kind = object->GetElementsKind();
ElementsKind new_elements_kind;
// The resized array has FAST_*_SMI_ELEMENTS if the capacity mode forces it,
// or if it's allowed and the old elements array contained only SMIs.
bool has_fast_smi_elements =
(smi_mode == kForceSmiElements) ||
- ((smi_mode == kAllowSmiElements) && HasFastSmiElements());
+ ((smi_mode == kAllowSmiElements) && object->HasFastSmiElements());
if (has_fast_smi_elements) {
if (IsHoleyElementsKind(elements_kind)) {
new_elements_kind = FAST_HOLEY_SMI_ELEMENTS;
@@ -11232,83 +11172,47 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
new_elements_kind = FAST_ELEMENTS;
}
}
- FixedArrayBase* old_elements = elements();
+ Handle<FixedArrayBase> old_elements(object->elements());
ElementsAccessor* accessor = ElementsAccessor::ForKind(new_elements_kind);
- MaybeObject* maybe_obj =
- accessor->CopyElements(this, new_elements, elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
+ accessor->CopyElements(object, new_elements, elements_kind);
- if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
- Map* new_map = map();
- if (new_elements_kind != elements_kind) {
- MaybeObject* maybe =
- GetElementsTransitionMap(GetIsolate(), new_elements_kind);
- if (!maybe->To(&new_map)) return maybe;
- }
- ValidateElements();
- set_map_and_elements(new_map, new_elements);
+ if (elements_kind != SLOPPY_ARGUMENTS_ELEMENTS) {
+ Handle<Map> new_map = (new_elements_kind != elements_kind)
+ ? GetElementsTransitionMap(object, new_elements_kind)
+ : handle(object->map());
+ object->ValidateElements();
+ object->set_map_and_elements(*new_map, *new_elements);
// Transition through the allocation site as well if present.
- maybe_obj = UpdateAllocationSite(new_elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
+ JSObject::UpdateAllocationSite(object, new_elements_kind);
} else {
- FixedArray* parameter_map = FixedArray::cast(old_elements);
- parameter_map->set(1, new_elements);
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(old_elements);
+ parameter_map->set(1, *new_elements);
}
if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements,
- GetElementsKind(), new_elements);
+ PrintElementsTransition(stdout, object, elements_kind, old_elements,
+ object->GetElementsKind(), new_elements);
}
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::FromInt(length));
+ if (object->IsJSArray()) {
+ Handle<JSArray>::cast(object)->set_length(Smi::FromInt(length));
}
return new_elements;
}
-bool Code::IsWeakEmbeddedObject(Kind kind, Object* object) {
- if (kind != Code::OPTIMIZED_FUNCTION) return false;
-
- if (object->IsMap()) {
- return Map::cast(object)->CanTransition() &&
- FLAG_collect_maps &&
- FLAG_weak_embedded_maps_in_optimized_code;
- }
-
- if (object->IsJSObject() ||
- (object->IsCell() && Cell::cast(object)->value()->IsJSObject())) {
- return FLAG_weak_embedded_objects_in_optimized_code;
- }
-
- return false;
-}
-
-
void JSObject::SetFastDoubleElementsCapacityAndLength(Handle<JSObject> object,
int capacity,
int length) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->SetFastDoubleElementsCapacityAndLength(capacity, length));
-}
-
-
-MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
- int capacity,
- int length) {
- Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
+ ASSERT(!object->HasExternalArrayElements());
- FixedArrayBase* elems;
- { MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedDoubleArray(capacity);
- if (!maybe_obj->To(&elems)) return maybe_obj;
- }
+ Handle<FixedArrayBase> elems =
+ object->GetIsolate()->factory()->NewFixedDoubleArray(capacity);
- ElementsKind elements_kind = GetElementsKind();
+ ElementsKind elements_kind = object->GetElementsKind();
+ CHECK(elements_kind != SLOPPY_ARGUMENTS_ELEMENTS);
ElementsKind new_elements_kind = elements_kind;
if (IsHoleyElementsKind(elements_kind)) {
new_elements_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
@@ -11316,49 +11220,37 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
new_elements_kind = FAST_DOUBLE_ELEMENTS;
}
- Map* new_map;
- { MaybeObject* maybe_obj =
- GetElementsTransitionMap(heap->isolate(), new_elements_kind);
- if (!maybe_obj->To(&new_map)) return maybe_obj;
- }
+ Handle<Map> new_map = GetElementsTransitionMap(object, new_elements_kind);
- FixedArrayBase* old_elements = elements();
+ Handle<FixedArrayBase> old_elements(object->elements());
ElementsAccessor* accessor = ElementsAccessor::ForKind(FAST_DOUBLE_ELEMENTS);
- { MaybeObject* maybe_obj =
- accessor->CopyElements(this, elems, elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
- if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
- ValidateElements();
- set_map_and_elements(new_map, elems);
- } else {
- FixedArray* parameter_map = FixedArray::cast(old_elements);
- parameter_map->set(1, elems);
- }
+ accessor->CopyElements(object, elems, elements_kind);
+
+ object->ValidateElements();
+ object->set_map_and_elements(*new_map, *elems);
if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements,
- GetElementsKind(), elems);
+ PrintElementsTransition(stdout, object, elements_kind, old_elements,
+ object->GetElementsKind(), elems);
}
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::FromInt(length));
+ if (object->IsJSArray()) {
+ Handle<JSArray>::cast(object)->set_length(Smi::FromInt(length));
}
-
- return this;
}
-MaybeObject* JSArray::Initialize(int capacity, int length) {
+// static
+void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
ASSERT(capacity >= 0);
- return GetHeap()->AllocateJSArrayStorage(this, length, capacity,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ array->GetIsolate()->factory()->NewJSArrayStorage(
+ array, length, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
}
-void JSArray::Expand(int required_size) {
- GetIsolate()->factory()->SetElementsCapacityAndLength(
- Handle<JSArray>(this), required_size, required_size);
+void JSArray::Expand(Handle<JSArray> array, int required_size) {
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ accessor->SetCapacityAndLength(array, required_size, required_size);
}
@@ -11370,12 +11262,17 @@ static bool GetOldValue(Isolate* isolate,
uint32_t index,
List<Handle<Object> >* old_values,
List<uint32_t>* indices) {
- PropertyAttributes attributes = object->GetLocalElementAttribute(index);
+ PropertyAttributes attributes =
+ JSReceiver::GetLocalElementAttribute(object, index);
ASSERT(attributes != ABSENT);
if (attributes == DONT_DELETE) return false;
- old_values->Add(object->GetLocalElementAccessorPair(index) == NULL
- ? Object::GetElement(isolate, object, index)
- : Handle<Object>::cast(isolate->factory()->the_hole_value()));
+ Handle<Object> value;
+ if (object->GetLocalElementAccessorPair(index) != NULL) {
+ value = Handle<Object>::cast(isolate->factory()->the_hole_value());
+ } else {
+ value = Object::GetElementNoExceptionThrown(isolate, object, index);
+ }
+ old_values->Add(value);
indices->Add(index);
return true;
}
@@ -11430,67 +11327,67 @@ static void EndPerformSplice(Handle<JSArray> object) {
}
-MaybeObject* JSArray::SetElementsLength(Object* len) {
+Handle<Object> JSArray::SetElementsLength(Handle<JSArray> array,
+ Handle<Object> new_length_handle) {
// We should never end in here with a pixel or external array.
- ASSERT(AllowsSetElementsLength());
- if (!(FLAG_harmony_observation && map()->is_observed()))
- return GetElementsAccessor()->SetLength(this, len);
+ ASSERT(array->AllowsSetElementsLength());
+ if (!array->map()->is_observed()) {
+ return array->GetElementsAccessor()->SetLength(array, new_length_handle);
+ }
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSArray> self(this);
+ Isolate* isolate = array->GetIsolate();
List<uint32_t> indices;
List<Handle<Object> > old_values;
- Handle<Object> old_length_handle(self->length(), isolate);
- Handle<Object> new_length_handle(len, isolate);
+ Handle<Object> old_length_handle(array->length(), isolate);
uint32_t old_length = 0;
CHECK(old_length_handle->ToArrayIndex(&old_length));
uint32_t new_length = 0;
- if (!new_length_handle->ToArrayIndex(&new_length))
- return Failure::InternalError();
+ CHECK(new_length_handle->ToArrayIndex(&new_length));
static const PropertyAttributes kNoAttrFilter = NONE;
- int num_elements = self->NumberOfLocalElements(kNoAttrFilter);
+ int num_elements = array->NumberOfLocalElements(kNoAttrFilter);
if (num_elements > 0) {
if (old_length == static_cast<uint32_t>(num_elements)) {
// Simple case for arrays without holes.
for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
- if (!GetOldValue(isolate, self, i, &old_values, &indices)) break;
+ if (!GetOldValue(isolate, array, i, &old_values, &indices)) break;
}
} else {
// For sparse arrays, only iterate over existing elements.
// TODO(rafaelw): For fast, sparse arrays, we can avoid iterating over
// the to-be-removed indices twice.
Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
- self->GetLocalElementKeys(*keys, kNoAttrFilter);
+ array->GetLocalElementKeys(*keys, kNoAttrFilter);
while (num_elements-- > 0) {
uint32_t index = NumberToUint32(keys->get(num_elements));
if (index < new_length) break;
- if (!GetOldValue(isolate, self, index, &old_values, &indices)) break;
+ if (!GetOldValue(isolate, array, index, &old_values, &indices)) break;
}
}
}
- MaybeObject* result =
- self->GetElementsAccessor()->SetLength(*self, *new_length_handle);
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ Handle<Object> hresult =
+ array->GetElementsAccessor()->SetLength(array, new_length_handle);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, hresult, hresult);
- CHECK(self->length()->ToArrayIndex(&new_length));
- if (old_length == new_length) return *hresult;
+ CHECK(array->length()->ToArrayIndex(&new_length));
+ if (old_length == new_length) return hresult;
- BeginPerformSplice(self);
+ BeginPerformSplice(array);
for (int i = 0; i < indices.length(); ++i) {
+ // For deletions where the property was an accessor, old_values[i]
+ // will be the hole, which instructs EnqueueChangeRecord to elide
+ // the "oldValue" property.
JSObject::EnqueueChangeRecord(
- self, "delete", isolate->factory()->Uint32ToString(indices[i]),
+ array, "delete", isolate->factory()->Uint32ToString(indices[i]),
old_values[i]);
}
JSObject::EnqueueChangeRecord(
- self, "update", isolate->factory()->length_string(),
+ array, "update", isolate->factory()->length_string(),
old_length_handle);
- EndPerformSplice(self);
+ EndPerformSplice(array);
uint32_t index = Min(old_length, new_length);
uint32_t add_count = new_length > old_length ? new_length - old_length : 0;
@@ -11498,18 +11395,21 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
if (delete_count > 0) {
for (int i = indices.length() - 1; i >= 0; i--) {
+ // Skip deletions where the property was an accessor, leaving holes
+ // in the array of old values.
+ if (old_values[i]->IsTheHole()) continue;
JSObject::SetElement(deleted, indices[i] - index, old_values[i], NONE,
- kNonStrictMode);
+ SLOPPY);
}
SetProperty(deleted, isolate->factory()->length_string(),
isolate->factory()->NewNumberFromUint(delete_count),
- NONE, kNonStrictMode);
+ NONE, SLOPPY);
}
- EnqueueSpliceRecord(self, index, deleted, add_count);
+ EnqueueSpliceRecord(array, index, deleted, add_count);
- return *hresult;
+ return hresult;
}
@@ -11764,23 +11664,14 @@ bool DependentCode::MarkCodeForDeoptimization(
// Mark all the code that needs to be deoptimized.
bool marked = false;
for (int i = start; i < end; i++) {
- Object* object = object_at(i);
- // TODO(hpayer): This is a temporary hack. Foreign objects move after
- // new space evacuation. Since pretenuring may mark these objects as aborted
- // we have to follow the forwarding pointer in that case.
- MapWord map_word = HeapObject::cast(object)->map_word();
- if (map_word.IsForwardingAddress()) {
- object = map_word.ToForwardingAddress();
- }
- if (object->IsCode()) {
- Code* code = Code::cast(object);
+ if (is_code_at(i)) {
+ Code* code = code_at(i);
if (!code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true);
marked = true;
}
} else {
- CompilationInfo* info = reinterpret_cast<CompilationInfo*>(
- Foreign::cast(object)->foreign_address());
+ CompilationInfo* info = compilation_info_at(i);
info->AbortDueToDependencyChange();
}
}
@@ -11886,7 +11777,7 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
new_map->set_prototype(*value);
}
ASSERT(new_map->prototype() == *value);
- real_receiver->set_map(*new_map);
+ JSObject::MigrateToMap(real_receiver, new_map);
if (!dictionary_elements_in_chain &&
new_map->DictionaryElementsInPrototypeChainOnly()) {
@@ -11902,16 +11793,16 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
}
-MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
- uint32_t first_arg,
- uint32_t arg_count,
- EnsureElementsMode mode) {
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Arguments* args,
+ uint32_t first_arg,
+ uint32_t arg_count,
+ EnsureElementsMode mode) {
// Elements in |Arguments| are ordered backwards (because they're on the
// stack), but the method that's called here iterates over them in forward
// direction.
return EnsureCanContainElements(
- args->arguments() - first_arg - (arg_count - 1),
- arg_count, mode);
+ object, args->arguments() - first_arg - (arg_count - 1), arg_count, mode);
}
@@ -11952,7 +11843,7 @@ Handle<Object> JSObject::SetElementWithInterceptor(
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = object->GetIsolate();
@@ -12040,7 +11931,7 @@ Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
Handle<JSObject> holder,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
@@ -12079,9 +11970,7 @@ Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
return SetPropertyWithDefinedSetter(
object, Handle<JSReceiver>::cast(setter), value);
} else {
- if (strict_mode == kNonStrictMode) {
- return value;
- }
+ if (strict_mode == SLOPPY) return value;
Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
Handle<Object> args[2] = { key, holder };
Handle<Object> error = isolate->factory()->NewTypeError(
@@ -12103,7 +11992,7 @@ bool JSObject::HasFastArgumentsElements() {
Heap* heap = GetHeap();
if (!elements()->IsFixedArray()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
- if (elements->map() != heap->non_strict_arguments_elements_map()) {
+ if (elements->map() != heap->sloppy_arguments_elements_map()) {
return false;
}
FixedArray* arguments = FixedArray::cast(elements->get(1));
@@ -12115,7 +12004,7 @@ bool JSObject::HasDictionaryArgumentsElements() {
Heap* heap = GetHeap();
if (!elements()->IsFixedArray()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
- if (elements->map() != heap->non_strict_arguments_elements_map()) {
+ if (elements->map() != heap->sloppy_arguments_elements_map()) {
return false;
}
FixedArray* arguments = FixedArray::cast(elements->get(1));
@@ -12129,7 +12018,7 @@ bool JSObject::HasDictionaryArgumentsElements() {
Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype) {
ASSERT(object->HasFastSmiOrObjectElements() ||
object->HasFastArgumentsElements());
@@ -12147,7 +12036,7 @@ Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
Handle<FixedArray> backing_store(FixedArray::cast(object->elements()));
if (backing_store->map() ==
- isolate->heap()->non_strict_arguments_elements_map()) {
+ isolate->heap()->sloppy_arguments_elements_map()) {
backing_store = handle(FixedArray::cast(backing_store->get(1)));
} else {
backing_store = EnsureWritableFastElements(object);
@@ -12227,7 +12116,7 @@ Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
UpdateAllocationSite(object, kind);
Handle<Map> new_map = GetElementsTransitionMap(object, kind);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
ASSERT(IsFastObjectElementsKind(object->GetElementsKind()));
}
// Increase backing store capacity if that's been decided previously.
@@ -12258,7 +12147,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
ASSERT(object->HasDictionaryElements() ||
@@ -12268,7 +12157,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
// Insert element in the dictionary.
Handle<FixedArray> elements(FixedArray::cast(object->elements()));
bool is_arguments =
- (elements->map() == isolate->heap()->non_strict_arguments_elements_map());
+ (elements->map() == isolate->heap()->sloppy_arguments_elements_map());
Handle<SeededNumberDictionary> dictionary(is_arguments
? SeededNumberDictionary::cast(elements->get(1))
: SeededNumberDictionary::cast(*elements));
@@ -12290,7 +12179,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
attributes, NORMAL, details.dictionary_index());
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
- if (strict_mode == kNonStrictMode) {
+ if (strict_mode == SLOPPY) {
return isolate->factory()->undefined_value();
} else {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
@@ -12328,7 +12217,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
// When we set the is_extensible flag to false we always force the
// element into dictionary mode (and force them to stay there).
if (!object->map()->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
+ if (strict_mode == SLOPPY) {
return isolate->factory()->undefined_value();
} else {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
@@ -12401,7 +12290,7 @@ Handle<Object> JSObject::SetFastDoubleElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype) {
ASSERT(object->HasFastDoubleElements());
@@ -12486,7 +12375,8 @@ Handle<Object> JSObject::SetFastDoubleElement(
// Otherwise default to slow case.
ASSERT(object->HasFastDoubleElements());
ASSERT(object->map()->has_fast_double_elements());
- ASSERT(object->elements()->IsFixedDoubleArray());
+ ASSERT(object->elements()->IsFixedDoubleArray() ||
+ object->elements()->length() == 0);
NormalizeElements(object);
ASSERT(object->HasDictionaryElements());
@@ -12498,7 +12388,7 @@ Handle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
if (object->IsJSProxy()) {
return JSProxy::SetElementWithHandler(
Handle<JSProxy>::cast(object), object, index, value, strict_mode);
@@ -12511,7 +12401,7 @@ Handle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
ASSERT(!object->HasExternalArrayElements());
return JSObject::SetElement(object, index, value, NONE, strict_mode, false);
}
@@ -12521,12 +12411,13 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = object->GetIsolate();
- if (object->HasExternalArrayElements()) {
+ if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
Handle<Object> number =
@@ -12538,8 +12429,8 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ if (!isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return value;
}
@@ -12556,7 +12447,9 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
}
// Don't allow element properties to be redefined for external arrays.
- if (object->HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
+ if ((object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) &&
+ set_mode == DEFINE_PROPERTY) {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[] = { object, number };
Handle<Object> error = isolate->factory()->NewTypeError(
@@ -12572,7 +12465,7 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
dictionary->set_requires_slow_elements();
}
- if (!(FLAG_harmony_observation && object->map()->is_observed())) {
+ if (!object->map()->is_observed()) {
return object->HasIndexedInterceptor()
? SetElementWithInterceptor(object, index, value, attributes, strict_mode,
check_prototype,
@@ -12583,14 +12476,16 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
set_mode);
}
- PropertyAttributes old_attributes = object->GetLocalElementAttribute(index);
+ PropertyAttributes old_attributes =
+ JSReceiver::GetLocalElementAttribute(object, index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
Handle<Object> old_length_handle;
Handle<Object> new_length_handle;
if (old_attributes != ABSENT) {
- if (object->GetLocalElementAccessorPair(index) == NULL)
- old_value = Object::GetElement(isolate, object, index);
+ if (object->GetLocalElementAccessorPair(index) == NULL) {
+ old_value = Object::GetElementNoExceptionThrown(isolate, object, index);
+ }
} else if (object->IsJSArray()) {
// Store old array length in case adding an element grows the array.
old_length_handle = handle(Handle<JSArray>::cast(object)->length(),
@@ -12609,7 +12504,7 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
Handle<String> name = isolate->factory()->Uint32ToString(index);
- PropertyAttributes new_attributes = object->GetLocalElementAttribute(index);
+ PropertyAttributes new_attributes = GetLocalElementAttribute(object, index);
if (old_attributes == ABSENT) {
if (object->IsJSArray() &&
!old_length_handle->SameValue(
@@ -12635,7 +12530,8 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
} else if (old_value->IsTheHole()) {
EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else {
- Handle<Object> new_value = Object::GetElement(isolate, object, index);
+ Handle<Object> new_value =
+ Object::GetElementNoExceptionThrown(isolate, object, index);
bool value_changed = !old_value->SameValue(*new_value);
if (old_attributes != new_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
@@ -12654,7 +12550,7 @@ Handle<Object> JSObject::SetElementWithoutInterceptor(
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
ASSERT(object->HasDictionaryElements() ||
@@ -12702,7 +12598,7 @@ Handle<Object> JSObject::SetElementWithoutInterceptor(
return SetDictionaryElement(object, index, value, attributes, strict_mode,
check_prototype,
set_mode);
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()));
uint32_t length = parameter_map->length();
Handle<Object> probe = index < length - 2 ?
@@ -12741,14 +12637,7 @@ Handle<Object> JSObject::SetElementWithoutInterceptor(
}
-void JSObject::TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->TransitionElementsKind(to_kind));
-}
-
-
-const double AllocationSite::kPretenureRatio = 0.60;
+const double AllocationSite::kPretenureRatio = 0.85;
void AllocationSite::ResetPretenureDecision() {
@@ -12779,11 +12668,13 @@ bool AllocationSite::IsNestedSite() {
}
-MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
- Isolate* isolate = GetIsolate();
+void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
+ ElementsKind to_kind) {
+ Isolate* isolate = site->GetIsolate();
- if (SitePointsToLiteral() && transition_info()->IsJSArray()) {
- JSArray* transition_info = JSArray::cast(this->transition_info());
+ if (site->SitePointsToLiteral() && site->transition_info()->IsJSArray()) {
+ Handle<JSArray> transition_info =
+ handle(JSArray::cast(site->transition_info()));
ElementsKind kind = transition_info->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
@@ -12796,22 +12687,21 @@ MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
CHECK(transition_info->length()->ToArrayIndex(&length));
if (length <= kMaximumArrayBytesToPretransition) {
if (FLAG_trace_track_allocation_sites) {
- bool is_nested = IsNestedSite();
+ bool is_nested = site->IsNestedSite();
PrintF(
"AllocationSite: JSArray %p boilerplate %s updated %s->%s\n",
- reinterpret_cast<void*>(this),
+ reinterpret_cast<void*>(*site),
is_nested ? "(nested)" : "",
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- MaybeObject* result = transition_info->TransitionElementsKind(to_kind);
- if (result->IsFailure()) return result;
- dependent_code()->DeoptimizeDependentCodeGroup(
+ JSObject::TransitionElementsKind(transition_info, to_kind);
+ site->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
}
}
} else {
- ElementsKind kind = GetElementsKind();
+ ElementsKind kind = site->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
@@ -12819,16 +12709,15 @@ MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
- reinterpret_cast<void*>(this),
+ reinterpret_cast<void*>(*site),
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- SetElementsKind(to_kind);
- dependent_code()->DeoptimizeDependentCodeGroup(
+ site->SetElementsKind(to_kind);
+ site->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
}
}
- return this;
}
@@ -12847,64 +12736,62 @@ void AllocationSite::AddDependentCompilationInfo(Handle<AllocationSite> site,
void JSObject::UpdateAllocationSite(Handle<JSObject> object,
ElementsKind to_kind) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->UpdateAllocationSite(to_kind));
-}
-
+ if (!object->IsJSArray()) return;
-MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
- if (!IsJSArray()) return this;
+ Heap* heap = object->GetHeap();
+ if (!heap->InNewSpace(*object)) return;
- Heap* heap = GetHeap();
- if (!heap->InNewSpace(this)) return this;
-
- // Check if there is potentially a memento behind the object. If
- // the last word of the momento is on another page we return
- // immediatelly.
- Address object_address = address();
- Address memento_address = object_address + JSArray::kSize;
- Address last_memento_word_address = memento_address + kPointerSize;
- if (!NewSpacePage::OnSamePage(object_address,
- last_memento_word_address)) {
- return this;
- }
+ Handle<AllocationSite> site;
+ {
+ DisallowHeapAllocation no_allocation;
+ // Check if there is potentially a memento behind the object. If
+ // the last word of the momento is on another page we return
+ // immediatelly.
+ Address object_address = object->address();
+ Address memento_address = object_address + JSArray::kSize;
+ Address last_memento_word_address = memento_address + kPointerSize;
+ if (!NewSpacePage::OnSamePage(object_address,
+ last_memento_word_address)) {
+ return;
+ }
- // Either object is the last object in the new space, or there is another
- // object of at least word size (the header map word) following it, so
- // suffices to compare ptr and top here.
- Address top = heap->NewSpaceTop();
- ASSERT(memento_address == top ||
- memento_address + HeapObject::kHeaderSize <= top);
- if (memento_address == top) return this;
+ // Either object is the last object in the new space, or there is another
+ // object of at least word size (the header map word) following it, so
+ // suffices to compare ptr and top here.
+ Address top = heap->NewSpaceTop();
+ ASSERT(memento_address == top ||
+ memento_address + HeapObject::kHeaderSize <= top);
+ if (memento_address == top) return;
- HeapObject* candidate = HeapObject::FromAddress(memento_address);
- if (candidate->map() != heap->allocation_memento_map()) return this;
+ HeapObject* candidate = HeapObject::FromAddress(memento_address);
+ if (candidate->map() != heap->allocation_memento_map()) return;
- AllocationMemento* memento = AllocationMemento::cast(candidate);
- if (!memento->IsValid()) return this;
+ AllocationMemento* memento = AllocationMemento::cast(candidate);
+ if (!memento->IsValid()) return;
- // Walk through to the Allocation Site
- AllocationSite* site = memento->GetAllocationSite();
- return site->DigestTransitionFeedback(to_kind);
+ // Walk through to the Allocation Site
+ site = handle(memento->GetAllocationSite());
+ }
+ AllocationSite::DigestTransitionFeedback(site, to_kind);
}
-MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
- ElementsKind from_kind = map()->elements_kind();
+void JSObject::TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ ElementsKind from_kind = object->map()->elements_kind();
if (IsFastHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
- if (from_kind == to_kind) return this;
+ if (from_kind == to_kind) return;
// Don't update the site if to_kind isn't fast
if (IsFastElementsKind(to_kind)) {
- MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ UpdateAllocationSite(object, to_kind);
}
- Isolate* isolate = GetIsolate();
- if (elements() == isolate->heap()->empty_fixed_array() ||
+ Isolate* isolate = object->GetIsolate();
+ if (object->elements() == isolate->heap()->empty_fixed_array() ||
(IsFastSmiOrObjectElementsKind(from_kind) &&
IsFastSmiOrObjectElementsKind(to_kind)) ||
(from_kind == FAST_DOUBLE_ELEMENTS &&
@@ -12912,54 +12799,48 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
ASSERT(from_kind != TERMINAL_FAST_ELEMENTS_KIND);
// No change is needed to the elements() buffer, the transition
// only requires a map change.
- MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
- Map* new_map;
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- set_map(new_map);
+ Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
+ MigrateToMap(object, new_map);
if (FLAG_trace_elements_transitions) {
- FixedArrayBase* elms = FixedArrayBase::cast(elements());
- PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
+ Handle<FixedArrayBase> elms(object->elements());
+ PrintElementsTransition(stdout, object, from_kind, elms, to_kind, elms);
}
- return this;
+ return;
}
- FixedArrayBase* elms = FixedArrayBase::cast(elements());
+ Handle<FixedArrayBase> elms(object->elements());
uint32_t capacity = static_cast<uint32_t>(elms->length());
uint32_t length = capacity;
- if (IsJSArray()) {
- Object* raw_length = JSArray::cast(this)->length();
+ if (object->IsJSArray()) {
+ Object* raw_length = Handle<JSArray>::cast(object)->length();
if (raw_length->IsUndefined()) {
// If length is undefined, then JSArray is being initialized and has no
// elements, assume a length of zero.
length = 0;
} else {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ CHECK(raw_length->ToArrayIndex(&length));
}
}
if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
- MaybeObject* maybe_result =
- SetFastDoubleElementsCapacityAndLength(capacity, length);
- if (maybe_result->IsFailure()) return maybe_result;
- ValidateElements();
- return this;
+ SetFastDoubleElementsCapacityAndLength(object, capacity, length);
+ object->ValidateElements();
+ return;
}
if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
- MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
- capacity, length, kDontAllowSmiElements);
- if (maybe_result->IsFailure()) return maybe_result;
- ValidateElements();
- return this;
+ SetFastElementsCapacityAndLength(object, capacity, length,
+ kDontAllowSmiElements);
+ object->ValidateElements();
+ return;
}
// This method should never be called for any other case than the ones
// handled above.
UNREACHABLE();
- return GetIsolate()->heap()->null_value();
}
@@ -13003,46 +12884,41 @@ MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
}
-MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
- uint32_t index) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::GetElementWithInterceptor(Handle<JSObject> object,
+ Handle<Object> receiver,
+ uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
- Handle<Object> this_handle(receiver, isolate);
- Handle<JSObject> holder_handle(this, isolate);
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor(), isolate);
if (!interceptor->getter()->IsUndefined()) {
v8::IndexedPropertyGetterCallback getter =
v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
+ ApiIndexedPropertyAccess("interceptor-indexed-get", *object, index));
PropertyCallbackArguments
- args(isolate, interceptor->data(), receiver, this);
+ args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Value> result = args.Call(getter, index);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox handle before return.
+ return Handle<Object>(*result_internal, isolate);
}
}
- Heap* heap = holder_handle->GetHeap();
- ElementsAccessor* handler = holder_handle->GetElementsAccessor();
- MaybeObject* raw_result = handler->Get(*this_handle,
- *holder_handle,
- index);
- if (raw_result != heap->the_hole_value()) return raw_result;
-
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ ElementsAccessor* handler = object->GetElementsAccessor();
+ Handle<Object> result = handler->Get(receiver, object, index);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
+ if (!result->IsTheHole()) return result;
- Object* pt = holder_handle->GetPrototype();
- if (pt == heap->null_value()) return heap->undefined_value();
- return pt->GetElementWithReceiver(isolate, *this_handle, index);
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return isolate->factory()->undefined_value();
+ return Object::GetElementWithReceiver(isolate, proto, receiver, index);
}
@@ -13061,7 +12937,7 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
FixedArrayBase* backing_store_base = FixedArrayBase::cast(elements());
FixedArray* backing_store = NULL;
switch (GetElementsKind()) {
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
backing_store_base =
FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
backing_store = FixedArray::cast(backing_store_base);
@@ -13103,8 +12979,9 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
}
// Fall through if packing is not guaranteed.
case FAST_HOLEY_DOUBLE_ELEMENTS: {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
- *capacity = elms->length();
+ *capacity = elements()->length();
+ if (*capacity == 0) break;
+ FixedDoubleArray * elms = FixedDoubleArray::cast(elements());
for (int i = 0; i < *capacity; i++) {
if (!elms->is_the_hole(i)) ++(*used);
}
@@ -13128,6 +13005,21 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
}
+bool JSObject::WouldConvertToSlowElements(Handle<Object> key) {
+ uint32_t index;
+ if (HasFastElements() && key->ToArrayIndex(&index)) {
+ Handle<FixedArrayBase> backing_store(FixedArrayBase::cast(elements()));
+ uint32_t capacity = static_cast<uint32_t>(backing_store->length());
+ if (index >= capacity) {
+ if ((index - capacity) >= kMaxGap) return true;
+ uint32_t new_capacity = NewElementsCapacity(index + 1);
+ return ShouldConvertToSlowElements(new_capacity);
+ }
+ }
+ return false;
+}
+
+
bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
STATIC_ASSERT(kMaxUncheckedOldFastElementsLength <=
kMaxUncheckedFastElementsLength);
@@ -13157,11 +13049,11 @@ bool JSObject::ShouldConvertToFastElements() {
if (IsAccessCheckNeeded()) return false;
// Observed objects may not go to fast mode because they rely on map checks,
// and for fast element accesses we sometimes check element kinds only.
- if (FLAG_harmony_observation && map()->is_observed()) return false;
+ if (map()->is_observed()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
SeededNumberDictionary* dictionary = NULL;
- if (elements->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ if (elements->map() == GetHeap()->sloppy_arguments_elements_map()) {
dictionary = SeededNumberDictionary::cast(elements->get(1));
} else {
dictionary = SeededNumberDictionary::cast(elements);
@@ -13187,6 +13079,7 @@ bool JSObject::ShouldConvertToFastElements() {
bool JSObject::ShouldConvertToFastDoubleElements(
bool* has_smi_only_elements) {
*has_smi_only_elements = false;
+ if (HasSloppyArgumentsElements()) return false;
if (FLAG_unbox_double_arrays) {
ASSERT(HasDictionaryElements());
SeededNumberDictionary* dictionary = element_dictionary();
@@ -13351,8 +13244,8 @@ bool JSObject::HasRealNamedProperty(Handle<JSObject> object,
SealHandleScope shs(isolate);
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ if (!isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
return false;
}
}
@@ -13365,11 +13258,11 @@ bool JSObject::HasRealNamedProperty(Handle<JSObject> object,
bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
Isolate* isolate = object->GetIsolate();
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ if (!isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
return false;
}
}
@@ -13382,8 +13275,8 @@ bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
return HasRealElementProperty(Handle<JSObject>::cast(proto), index);
}
- return object->GetElementAttributeWithoutInterceptor(
- *object, index, false) != ABSENT;
+ return GetElementAttributeWithoutInterceptor(
+ object, object, index, false) != ABSENT;
}
@@ -13393,8 +13286,8 @@ bool JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
SealHandleScope shs(isolate);
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ if (!isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
return false;
}
}
@@ -13641,7 +13534,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* parameter_map = FixedArray::cast(elements());
int mapped_length = parameter_map->length() - 2;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
@@ -13739,11 +13632,11 @@ class StringSharedKey : public HashTableKey {
public:
StringSharedKey(String* source,
SharedFunctionInfo* shared,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position)
: source_(source),
shared_(shared),
- language_mode_(language_mode),
+ strict_mode_(strict_mode),
scope_position_(scope_position) { }
bool IsMatch(Object* other) {
@@ -13751,12 +13644,10 @@ class StringSharedKey : public HashTableKey {
FixedArray* other_array = FixedArray::cast(other);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
if (shared != shared_) return false;
- int language_unchecked = Smi::cast(other_array->get(2))->value();
- ASSERT(language_unchecked == CLASSIC_MODE ||
- language_unchecked == STRICT_MODE ||
- language_unchecked == EXTENDED_MODE);
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- if (language_mode != language_mode_) return false;
+ int strict_unchecked = Smi::cast(other_array->get(2))->value();
+ ASSERT(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked);
+ if (strict_mode != strict_mode_) return false;
int scope_position = Smi::cast(other_array->get(3))->value();
if (scope_position != scope_position_) return false;
String* source = String::cast(other_array->get(1));
@@ -13765,7 +13656,7 @@ class StringSharedKey : public HashTableKey {
static uint32_t StringSharedHashHelper(String* source,
SharedFunctionInfo* shared,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
uint32_t hash = source->Hash();
if (shared->HasSourceCode()) {
@@ -13776,8 +13667,7 @@ class StringSharedKey : public HashTableKey {
// collection.
Script* script = Script::cast(shared->script());
hash ^= String::cast(script->source())->Hash();
- if (language_mode == STRICT_MODE) hash ^= 0x8000;
- if (language_mode == EXTENDED_MODE) hash ^= 0x0080;
+ if (strict_mode == STRICT) hash ^= 0x8000;
hash += scope_position;
}
return hash;
@@ -13785,21 +13675,19 @@ class StringSharedKey : public HashTableKey {
uint32_t Hash() {
return StringSharedHashHelper(
- source_, shared_, language_mode_, scope_position_);
+ source_, shared_, strict_mode_, scope_position_);
}
uint32_t HashForObject(Object* obj) {
FixedArray* other_array = FixedArray::cast(obj);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
String* source = String::cast(other_array->get(1));
- int language_unchecked = Smi::cast(other_array->get(2))->value();
- ASSERT(language_unchecked == CLASSIC_MODE ||
- language_unchecked == STRICT_MODE ||
- language_unchecked == EXTENDED_MODE);
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+ int strict_unchecked = Smi::cast(other_array->get(2))->value();
+ ASSERT(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked);
int scope_position = Smi::cast(other_array->get(3))->value();
return StringSharedHashHelper(
- source, shared, language_mode, scope_position);
+ source, shared, strict_mode, scope_position);
}
MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) {
@@ -13810,7 +13698,7 @@ class StringSharedKey : public HashTableKey {
FixedArray* other_array = FixedArray::cast(obj);
other_array->set(0, shared_);
other_array->set(1, source_);
- other_array->set(2, Smi::FromInt(language_mode_));
+ other_array->set(2, Smi::FromInt(strict_mode_));
other_array->set(3, Smi::FromInt(scope_position_));
return other_array;
}
@@ -13818,7 +13706,7 @@ class StringSharedKey : public HashTableKey {
private:
String* source_;
SharedFunctionInfo* shared_;
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
int scope_position_;
};
@@ -13991,7 +13879,7 @@ MaybeObject* HashTable<Shape, Key>::Allocate(Heap* heap,
? at_least_space_for
: ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) {
- return Failure::OutOfMemoryException(0x10);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
}
Object* obj;
@@ -14454,8 +14342,11 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
uint32_t limit) {
Isolate* isolate = object->GetIsolate();
+ if (object->HasSloppyArgumentsElements() ||
+ object->map()->is_observed()) {
+ return handle(Smi::FromInt(-1), isolate);
+ }
- ASSERT(!object->map()->is_observed());
if (object->HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
@@ -14477,10 +14368,11 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
object->ValidateElements();
object->set_map_and_elements(*new_map, *fast_elements);
- } else if (object->HasExternalArrayElements()) {
- // External arrays cannot have holes or undefined elements.
+ } else if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
+ // Typed arrays cannot have holes or undefined elements.
return handle(Smi::FromInt(
- ExternalArray::cast(object->elements())->length()), isolate);
+ FixedArrayBase::cast(object->elements())->length()), isolate);
} else if (!object->HasFastDoubleElements()) {
EnsureWritableFastElements(object);
}
@@ -14581,12 +14473,14 @@ ExternalArrayType JSTypedArray::type() {
switch (elements()->map()->instance_type()) {
#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype, size) \
case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
return kExternal##Type##Array;
TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
#undef INSTANCE_TYPE_TO_ARRAY_TYPE
default:
+ UNREACHABLE();
return static_cast<ExternalArrayType>(-1);
}
}
@@ -15011,22 +14905,11 @@ MaybeObject* StringTable::LookupKey(HashTableKey* key, Object** s) {
}
-// The key for the script compilation cache is dependent on the mode flags,
-// because they change the global language mode and thus binding behaviour.
-// If flags change at some point, we must ensure that we do not hit the cache
-// for code compiled with different settings.
-static LanguageMode CurrentGlobalLanguageMode() {
- return FLAG_use_strict
- ? (FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE)
- : CLASSIC_MODE;
-}
-
-
Object* CompilationCacheTable::Lookup(String* src, Context* context) {
SharedFunctionInfo* shared = context->closure()->shared();
StringSharedKey key(src,
shared,
- CurrentGlobalLanguageMode(),
+ FLAG_use_strict ? STRICT : SLOPPY,
RelocInfo::kNoPosition);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -15036,11 +14919,11 @@ Object* CompilationCacheTable::Lookup(String* src, Context* context) {
Object* CompilationCacheTable::LookupEval(String* src,
Context* context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
StringSharedKey key(src,
context->closure()->shared(),
- language_mode,
+ strict_mode,
scope_position);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -15063,7 +14946,7 @@ MaybeObject* CompilationCacheTable::Put(String* src,
SharedFunctionInfo* shared = context->closure()->shared();
StringSharedKey key(src,
shared,
- CurrentGlobalLanguageMode(),
+ FLAG_use_strict ? STRICT : SLOPPY,
RelocInfo::kNoPosition);
CompilationCacheTable* cache;
MaybeObject* maybe_cache = EnsureCapacity(1, &key);
@@ -15087,7 +14970,7 @@ MaybeObject* CompilationCacheTable::PutEval(String* src,
int scope_position) {
StringSharedKey key(src,
context->closure()->shared(),
- value->language_mode(),
+ value->strict_mode(),
scope_position);
Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, &key);
@@ -15516,8 +15399,7 @@ int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
int result = 0;
for (int i = 0; i < capacity; i++) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k) &&
- !FilterKey(k, filter)) {
+ if (HashTable<Shape, Key>::IsKey(k) && !FilterKey(k, filter)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
@@ -15531,7 +15413,7 @@ int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
template<typename Shape, typename Key>
int Dictionary<Shape, Key>::NumberOfEnumElements() {
return NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(DONT_ENUM));
+ static_cast<PropertyAttributes>(DONT_ENUM | SYMBOLIC));
}
@@ -15540,12 +15422,12 @@ void Dictionary<Shape, Key>::CopyKeysTo(
FixedArray* storage,
PropertyAttributes filter,
typename Dictionary<Shape, Key>::SortMode sort_mode) {
- ASSERT(storage->length() >= NumberOfEnumElements());
+ ASSERT(storage->length() >= NumberOfElementsFilterAttributes(filter));
int capacity = HashTable<Shape, Key>::Capacity();
int index = 0;
for (int i = 0; i < capacity; i++) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
+ if (HashTable<Shape, Key>::IsKey(k) && !FilterKey(k, filter)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
@@ -15559,45 +15441,38 @@ void Dictionary<Shape, Key>::CopyKeysTo(
}
-FixedArray* NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
+struct EnumIndexComparator {
+ explicit EnumIndexComparator(NameDictionary* dict) : dict(dict) { }
+ bool operator() (Smi* a, Smi* b) {
+ PropertyDetails da(dict->DetailsAt(a->value()));
+ PropertyDetails db(dict->DetailsAt(b->value()));
+ return da.dictionary_index() < db.dictionary_index();
+ }
+ NameDictionary* dict;
+};
+
+
+void NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
int length = storage->length();
- ASSERT(length >= NumberOfEnumElements());
- Heap* heap = GetHeap();
- Object* undefined_value = heap->undefined_value();
int capacity = Capacity();
int properties = 0;
-
- // Fill in the enumeration array by assigning enumerable keys at their
- // enumeration index. This will leave holes in the array if there are keys
- // that are deleted or not enumerable.
for (int i = 0; i < capacity; i++) {
Object* k = KeyAt(i);
if (IsKey(k) && !k->IsSymbol()) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted() || details.IsDontEnum()) continue;
+ storage->set(properties, Smi::FromInt(i));
properties++;
- storage->set(details.dictionary_index() - 1, k);
if (properties == length) break;
}
}
-
- // There are holes in the enumeration array if less properties were assigned
- // than the length of the array. If so, crunch all the existing properties
- // together by shifting them to the left (maintaining the enumeration order),
- // and trimming of the right side of the array.
- if (properties < length) {
- if (properties == 0) return heap->empty_fixed_array();
- properties = 0;
- for (int i = 0; i < length; ++i) {
- Object* value = storage->get(i);
- if (value != undefined_value) {
- storage->set(properties, value);
- ++properties;
- }
- }
- RightTrimFixedArray<FROM_MUTATOR>(heap, storage, length - properties);
+ EnumIndexComparator cmp(this);
+ Smi** start = reinterpret_cast<Smi**>(storage->GetFirstElementAddress());
+ std::sort(start, start + length, cmp);
+ for (int i = 0; i < length; i++) {
+ int index = Smi::cast(storage->get(i))->value();
+ storage->set(i, KeyAt(index));
}
- return storage;
}
@@ -15607,12 +15482,11 @@ void Dictionary<Shape, Key>::CopyKeysTo(
int index,
PropertyAttributes filter,
typename Dictionary<Shape, Key>::SortMode sort_mode) {
- ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(NONE)));
+ ASSERT(storage->length() >= NumberOfElementsFilterAttributes(filter));
int capacity = HashTable<Shape, Key>::Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
+ if (HashTable<Shape, Key>::IsKey(k) && !FilterKey(k, filter)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
@@ -16445,6 +16319,65 @@ void JSTypedArray::Neuter() {
}
+static ElementsKind FixedToExternalElementsKind(ElementsKind elements_kind) {
+ switch (elements_kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: return EXTERNAL_##TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ default:
+ UNREACHABLE();
+ return FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+ }
+}
+
+
+Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
+ Handle<JSTypedArray> typed_array) {
+
+ Handle<Map> map(typed_array->map());
+ Isolate* isolate = typed_array->GetIsolate();
+
+ ASSERT(IsFixedTypedArrayElementsKind(map->elements_kind()));
+
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ Handle<FixedTypedArrayBase> fixed_typed_array(
+ FixedTypedArrayBase::cast(typed_array->elements()));
+ Runtime::SetupArrayBufferAllocatingData(isolate, buffer,
+ fixed_typed_array->DataSize(), false);
+ memcpy(buffer->backing_store(),
+ fixed_typed_array->DataPtr(),
+ fixed_typed_array->DataSize());
+ Handle<ExternalArray> new_elements =
+ isolate->factory()->NewExternalArray(
+ fixed_typed_array->length(), typed_array->type(),
+ static_cast<uint8_t*>(buffer->backing_store()));
+ Handle<Map> new_map = JSObject::GetElementsTransitionMap(
+ typed_array,
+ FixedToExternalElementsKind(map->elements_kind()));
+
+ buffer->set_weak_first_view(*typed_array);
+ ASSERT(typed_array->weak_next() == isolate->heap()->undefined_value());
+ typed_array->set_buffer(*buffer);
+ typed_array->set_map_and_elements(*new_map, *new_elements);
+
+ return buffer;
+}
+
+
+Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
+ Handle<Object> result(buffer(), GetIsolate());
+ if (*result != Smi::FromInt(0)) {
+ ASSERT(IsExternalArrayElementsKind(map()->elements_kind()));
+ return Handle<JSArrayBuffer>::cast(result);
+ }
+ Handle<JSTypedArray> self(this);
+ return MaterializeArrayBuffer(self);
+}
+
+
HeapType* PropertyCell::type() {
return static_cast<HeapType*>(type_raw());
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 1b40752507..e3ed08c4dc 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -37,7 +37,9 @@
#include "property-details.h"
#include "smart-pointers.h"
#include "unicode-inl.h"
-#if V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM64
+#include "arm64/constants-arm64.h"
+#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
@@ -932,7 +934,6 @@ class MaybeObject BASE_EMBEDDED {
public:
inline bool IsFailure();
inline bool IsRetryAfterGC();
- inline bool IsOutOfMemory();
inline bool IsException();
INLINE(bool IsTheHole());
INLINE(bool IsUninitialized());
@@ -1038,7 +1039,6 @@ class MaybeObject BASE_EMBEDDED {
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(DependentCode) \
- V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
V(ConstantPoolArray) \
@@ -1129,6 +1129,9 @@ class MaybeObject BASE_EMBEDDED {
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
V(kContextAllocatedArguments, "Context-allocated arguments") \
+ V(kCopyBuffersOverlap, "Copy buffers overlap") \
+ V(kCouldNotGenerateZero, "Could not generate +0.0") \
+ V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
V(kDebuggerIsActive, "Debugger is active") \
V(kDebuggerStatement, "DebuggerStatement") \
V(kDeclarationInCatchContext, "Declaration in catch context") \
@@ -1141,18 +1144,34 @@ class MaybeObject BASE_EMBEDDED {
"DontDelete cells can't contain the hole") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
+ V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
V(kEval, "eval") \
V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
- V(kExpectedAlignmentMarker, "expected alignment marker") \
- V(kExpectedAllocationSite, "expected allocation site") \
- V(kExpectedPropertyCellInRegisterA2, \
- "Expected property cell in register a2") \
- V(kExpectedPropertyCellInRegisterEbx, \
- "Expected property cell in register ebx") \
- V(kExpectedPropertyCellInRegisterRbx, \
- "Expected property cell in register rbx") \
+ V(kExpectedAlignmentMarker, "Expected alignment marker") \
+ V(kExpectedAllocationSite, "Expected allocation site") \
+ V(kExpectedFunctionObject, "Expected function object in register") \
+ V(kExpectedHeapNumber, "Expected HeapNumber") \
+ V(kExpectedNativeContext, "Expected native context") \
+ V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
+ V(kExpectedNonNullContext, "Expected non-null context") \
+ V(kExpectedPositiveZero, "Expected +0.0") \
+ V(kExpectedAllocationSiteInCell, \
+ "Expected AllocationSite in property cell") \
+ V(kExpectedFixedArrayInFeedbackVector, \
+ "Expected fixed array in feedback vector") \
+ V(kExpectedFixedArrayInRegisterA2, \
+ "Expected fixed array in register a2") \
+ V(kExpectedFixedArrayInRegisterEbx, \
+ "Expected fixed array in register ebx") \
+ V(kExpectedFixedArrayInRegisterR2, \
+ "Expected fixed array in register r2") \
+ V(kExpectedFixedArrayInRegisterRbx, \
+ "Expected fixed array in register rbx") \
+ V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \
+ V(kExpectedUndefinedOrCell, \
+ "Expected undefined or cell in register") \
V(kExpectingAlignmentForCopyBytes, \
"Expecting alignment for CopyBytes") \
V(kExportDeclaration, "Export declaration") \
@@ -1197,6 +1216,7 @@ class MaybeObject BASE_EMBEDDED {
V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
+ V(kInputStringTooLong, "Input string too long") \
V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
"InstanceofStub unexpected call site cache (check)") \
V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
@@ -1210,6 +1230,7 @@ class MaybeObject BASE_EMBEDDED {
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
+ V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
@@ -1222,7 +1243,10 @@ class MaybeObject BASE_EMBEDDED {
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
+ V(kLhsHasBeenClobbered, "lhs has been clobbered") \
V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
+ V(kLiveEditFrameDroppingIsNotSupportedOnARM64, \
+ "LiveEdit frame dropping is not supported on arm64") \
V(kLiveEditFrameDroppingIsNotSupportedOnArm, \
"LiveEdit frame dropping is not supported on arm") \
V(kLiveEditFrameDroppingIsNotSupportedOnMips, \
@@ -1258,6 +1282,7 @@ class MaybeObject BASE_EMBEDDED {
"Object literal with complex property") \
V(kOddballInStringTableIsNotUndefinedOrTheHole, \
"Oddball in string table is not undefined or the hole") \
+ V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
@@ -1273,6 +1298,7 @@ class MaybeObject BASE_EMBEDDED {
"Out of virtual registers while trying to allocate temp register") \
V(kParseScopeError, "Parse/scope error") \
V(kPossibleDirectCallToEval, "Possible direct call to eval") \
+ V(kPreconditionsWereNotMet, "Preconditions were not met") \
V(kPropertyAllocationCountFailed, "Property allocation count failed") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
@@ -1282,24 +1308,40 @@ class MaybeObject BASE_EMBEDDED {
V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
+ V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
+ V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
+ V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
V(kScopedBlock, "ScopedBlock") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
+ V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSwitchStatementMixedOrNonLiteralSwitchLabels, \
"SwitchStatement: mixed or non-literal switch labels") \
V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses") \
+ V(kTheCurrentStackPointerIsBelowCsp, \
+ "The current stack pointer is below csp") \
V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
V(kTheInstructionToPatchShouldBeALoadFromPc, \
"The instruction to patch should be a load from pc") \
+ V(kTheInstructionToPatchShouldBeALoadFromPp, \
+ "The instruction to patch should be a load from pp") \
+ V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
+ "The instruction to patch should be a ldr literal") \
V(kTheInstructionToPatchShouldBeALui, \
"The instruction to patch should be a lui") \
V(kTheInstructionToPatchShouldBeAnOri, \
"The instruction to patch should be an ori") \
+ V(kTheSourceAndDestinationAreTheSame, \
+ "The source and destination are the same") \
+ V(kTheStackPointerIsNotAligned, "The stack pointer is not aligned.") \
+ V(kTheStackWasCorruptedByMacroAssemblerCall, \
+ "The stack was corrupted by MacroAssembler::Call()") \
V(kTooManyParametersLocals, "Too many parameters/locals") \
V(kTooManyParameters, "Too many parameters") \
V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
+ V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.") \
V(kToOperandIsDoubleRegisterUnimplemented, \
"ToOperand IsDoubleRegister unimplemented") \
V(kToOperandUnsupportedDoubleImmediate, \
@@ -1308,10 +1350,12 @@ class MaybeObject BASE_EMBEDDED {
V(kTryFinallyStatement, "TryFinallyStatement") \
V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
+ V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
V(kUndoAllocationOfNonAllocatedMemory, \
"Undo allocation of non allocated memory") \
V(kUnexpectedAllocationTop, "Unexpected allocation top") \
+ V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
@@ -1338,16 +1382,20 @@ class MaybeObject BASE_EMBEDDED {
"Unexpected initial map for InternalArray function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
+ V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
"Unexpected number of pre-allocated property fields") \
+ V(kUnexpectedSmi, "Unexpected smi value") \
V(kUnexpectedStringFunction, "Unexpected String function") \
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedStringWrapperInstanceSize, \
"Unexpected string wrapper instance size") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
+ V(kUnexpectedValue, "Unexpected value") \
V(kUnexpectedUnusedPropertiesOfStringWrapper, \
"Unexpected unused properties of string wrapper") \
+ V(kUnimplemented, "unimplemented") \
V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
V(kUnknown, "Unknown") \
V(kUnsupportedConstCompoundAssignment, \
@@ -1487,6 +1535,8 @@ class Object : public MaybeObject {
// Converts this to a Smi if possible.
// Failure is returned otherwise.
+ static MUST_USE_RESULT inline Handle<Object> ToSmi(Isolate* isolate,
+ Handle<Object> object);
MUST_USE_RESULT inline MaybeObject* ToSmi();
void Lookup(Name* name, LookupResult* result);
@@ -1530,16 +1580,20 @@ class Object : public MaybeObject {
MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
JSReceiver* getter);
- static Handle<Object> GetElement(Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
- MUST_USE_RESULT inline MaybeObject* GetElement(Isolate* isolate,
- uint32_t index);
+ static inline Handle<Object> GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
+
// For use when we know that no exception can be thrown.
- inline Object* GetElementNoExceptionThrown(Isolate* isolate, uint32_t index);
- MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Isolate* isolate,
- Object* receiver,
- uint32_t index);
+ static inline Handle<Object> GetElementNoExceptionThrown(
+ Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
+
+ static Handle<Object> GetElementWithReceiver(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> receiver,
+ uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
Object* GetPrototype(Isolate* isolate);
@@ -1675,15 +1729,11 @@ class Failure: public MaybeObject {
inline AllocationSpace allocation_space() const;
inline bool IsInternalError() const;
- inline bool IsOutOfMemoryException() const;
static inline Failure* RetryAfterGC(AllocationSpace space);
static inline Failure* RetryAfterGC(); // NEW_SPACE
static inline Failure* Exception();
static inline Failure* InternalError();
- // TODO(jkummerow): The value is temporary instrumentation. Remove it
- // when it has served its purpose.
- static inline Failure* OutOfMemoryException(intptr_t value);
// Casting.
static inline Failure* cast(MaybeObject* object);
@@ -1848,6 +1898,8 @@ class HeapObject: public Object {
inline void IteratePointers(ObjectVisitor* v, int start, int end);
// as above, for the single element at "offset"
inline void IteratePointer(ObjectVisitor* v, int offset);
+ // as above, for the next code link of a code object.
+ inline void IterateNextCodeLink(ObjectVisitor* v, int offset);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
@@ -1998,14 +2050,14 @@ class JSReceiver: public HeapObject {
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode =
MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetElement(Handle<JSReceiver> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
static inline bool HasProperty(Handle<JSReceiver> object, Handle<Name> name);
@@ -2031,13 +2083,23 @@ class JSReceiver: public HeapObject {
// function that was used to instantiate the object).
String* constructor_name();
- inline PropertyAttributes GetPropertyAttribute(Name* name);
- PropertyAttributes GetPropertyAttributeWithReceiver(JSReceiver* receiver,
- Name* name);
- PropertyAttributes GetLocalPropertyAttribute(Name* name);
+ static inline PropertyAttributes GetPropertyAttribute(
+ Handle<JSReceiver> object,
+ Handle<Name> name);
+ static PropertyAttributes GetPropertyAttributeWithReceiver(
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name);
+ static PropertyAttributes GetLocalPropertyAttribute(
+ Handle<JSReceiver> object,
+ Handle<Name> name);
- inline PropertyAttributes GetElementAttribute(uint32_t index);
- inline PropertyAttributes GetLocalElementAttribute(uint32_t index);
+ static inline PropertyAttributes GetElementAttribute(
+ Handle<JSReceiver> object,
+ uint32_t index);
+ static inline PropertyAttributes GetLocalElementAttribute(
+ Handle<JSReceiver> object,
+ uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
@@ -2068,17 +2130,19 @@ class JSReceiver: public HeapObject {
Handle<Object> value);
private:
- PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver,
- LookupResult* result,
- Name* name,
- bool continue_search);
+ static PropertyAttributes GetPropertyAttributeForResult(
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> receiver,
+ LookupResult* result,
+ Handle<Name> name,
+ bool continue_search);
static Handle<Object> SetProperty(Handle<JSReceiver> receiver,
LookupResult* result,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_from_keyed);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
@@ -2110,14 +2174,14 @@ class JSObject: public JSReceiver {
// In the fast mode elements is a FixedArray and so each element can
// be quickly accessed. This fact is used in the generated code. The
// elements array can have one of three maps in this mode:
- // fixed_array_map, non_strict_arguments_elements_map or
+ // fixed_array_map, sloppy_arguments_elements_map or
// fixed_cow_array_map (for copy-on-write arrays). In the latter case
// the elements array may be shared by a few objects and so before
// writing to any element the array must be copied. Use
// EnsureWritableFastElements in this case.
//
// In the slow mode the elements is either a NumberDictionary, an
- // ExternalArray, or a FixedArray parameter map for a (non-strict)
+ // ExternalArray, or a FixedArray parameter map for a (sloppy)
// arguments object.
DECL_ACCESSORS(elements, FixedArrayBase)
inline void initialize_elements();
@@ -2139,7 +2203,7 @@ class JSObject: public JSReceiver {
// Returns true if an object has elements of FAST_HOLEY_*_ELEMENTS
// ElementsKind.
inline bool HasFastHoleyElements();
- inline bool HasNonStrictArgumentsElements();
+ inline bool HasSloppyArgumentsElements();
inline bool HasDictionaryElements();
inline bool HasExternalUint8ClampedElements();
@@ -2155,6 +2219,17 @@ class JSObject: public JSReceiver {
inline bool HasFixedTypedArrayElements();
+ inline bool HasFixedUint8ClampedElements();
+ inline bool HasFixedArrayElements();
+ inline bool HasFixedInt8Elements();
+ inline bool HasFixedUint8Elements();
+ inline bool HasFixedInt16Elements();
+ inline bool HasFixedUint16Elements();
+ inline bool HasFixedInt32Elements();
+ inline bool HasFixedUint32Elements();
+ inline bool HasFixedFloat32Elements();
+ inline bool HasFixedFloat64Elements();
+
bool HasFastArgumentsElements();
bool HasDictionaryArgumentsElements();
inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
@@ -2191,14 +2266,14 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
Handle<JSObject> holder,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetPropertyWithInterceptor(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetPropertyForResult(
Handle<JSObject> object,
@@ -2206,7 +2281,7 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetLocalPropertyIgnoreAttributes(
@@ -2240,12 +2315,12 @@ class JSObject: public JSReceiver {
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
- Object* GetNormalizedProperty(LookupResult* result);
+ Object* GetNormalizedProperty(const LookupResult* result);
// Sets the property value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
static void SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
+ const LookupResult* result,
Handle<Object> value);
// Sets the property value in a normalized object given (key, value, details).
@@ -2262,20 +2337,26 @@ class JSObject: public JSReceiver {
InterceptorInfo* GetIndexedInterceptor();
// Used from JSReceiver.
- PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
- Name* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
- Name* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
+ static PropertyAttributes GetPropertyAttributePostInterceptor(
+ Handle<JSObject> object,
+ Handle<JSObject> receiver,
+ Handle<Name> name,
+ bool continue_search);
+ static PropertyAttributes GetPropertyAttributeWithInterceptor(
+ Handle<JSObject> object,
+ Handle<JSObject> receiver,
+ Handle<Name> name,
+ bool continue_search);
+ static PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
+ Handle<JSObject> object,
LookupResult* result,
- Name* name,
+ Handle<Name> name,
+ bool continue_search);
+ static PropertyAttributes GetElementAttributeWithReceiver(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
bool continue_search);
- PropertyAttributes GetElementAttributeWithReceiver(JSReceiver* receiver,
- uint32_t index,
- bool continue_search);
// Retrieves an AccessorPair property from the given object. Might return
// undefined if the property doesn't exist or is of a different kind.
@@ -2316,10 +2397,6 @@ class JSObject: public JSReceiver {
// been modified since it was created. May give false positives.
bool IsDirty();
- // If the receiver is a JSGlobalProxy this method will return its prototype,
- // otherwise the result is the receiver itself.
- inline Object* BypassGlobalProxy();
-
// Accessors for hidden properties object.
//
// Hidden properties are not local properties of the object itself.
@@ -2343,7 +2420,7 @@ class JSObject: public JSReceiver {
static void DeleteHiddenProperty(Handle<JSObject> object,
Handle<Name> key);
// Returns true if the object has a property with the hidden string as name.
- bool HasHiddenProperties();
+ static bool HasHiddenProperties(Handle<JSObject> object);
static void SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash);
@@ -2353,20 +2430,26 @@ class JSObject: public JSReceiver {
static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
// Makes sure that this object can contain the specified elements.
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
+ static inline void EnsureCanContainElements(
+ Handle<JSObject> object,
Object** elements,
uint32_t count,
EnsureElementsMode mode);
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
- FixedArrayBase* elements,
+ static inline void EnsureCanContainElements(
+ Handle<JSObject> object,
+ Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode);
- MUST_USE_RESULT MaybeObject* EnsureCanContainElements(
+ static void EnsureCanContainElements(
+ Handle<JSObject> object,
Arguments* arguments,
uint32_t first_arg,
uint32_t arg_count,
EnsureElementsMode mode);
+ // Would we convert a fast elements array to dictionary mode given
+ // an access at key?
+ bool WouldConvertToSlowElements(Handle<Object> key);
// Do we want to keep the elements in fast case when increasing the
// capacity?
bool ShouldConvertToSlowElements(int new_capacity);
@@ -2392,13 +2475,13 @@ class JSObject: public JSReceiver {
static Handle<Object> SetFastElement(Handle<JSObject> object, uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype);
static Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
// Empty handle is returned if the element cannot be set to the given value.
static Handle<Object> SetElement(
@@ -2406,14 +2489,15 @@ class JSObject: public JSReceiver {
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype = true,
SetPropertyMode set_mode = SET_PROPERTY);
// Returns the index'th element.
// The undefined object if index is out of bounds.
- MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver,
- uint32_t index);
+ static Handle<Object> GetElementWithInterceptor(Handle<JSObject> object,
+ Handle<Object> receiver,
+ uint32_t index);
enum SetFastElementsCapacitySmiMode {
kAllowSmiElements,
@@ -2421,15 +2505,11 @@ class JSObject: public JSReceiver {
kDontAllowSmiElements
};
- static Handle<FixedArray> SetFastElementsCapacityAndLength(
- Handle<JSObject> object,
- int capacity,
- int length,
- SetFastElementsCapacitySmiMode smi_mode);
// Replace the elements' backing store with fast elements of the given
// capacity. Update the length for JSArrays. Returns the new backing
// store.
- MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
+ static Handle<FixedArray> SetFastElementsCapacityAndLength(
+ Handle<JSObject> object,
int capacity,
int length,
SetFastElementsCapacitySmiMode smi_mode);
@@ -2505,8 +2585,6 @@ class JSObject: public JSReceiver {
static void TransitionElementsKind(Handle<JSObject> object,
ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
-
// TODO(mstarzinger): Both public because of ConvertAnsSetLocalProperty().
static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map);
static void GeneralizeFieldRepresentation(Handle<JSObject> object,
@@ -2527,8 +2605,6 @@ class JSObject: public JSReceiver {
static Handle<SeededNumberDictionary> NormalizeElements(
Handle<JSObject> object);
- MUST_USE_RESULT MaybeObject* NormalizeElements();
-
// Transform slow named properties to fast variants.
static void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
@@ -2600,9 +2676,10 @@ class JSObject: public JSReceiver {
void PrintTransitions(FILE* out = stdout);
#endif
- void PrintElementsTransition(
- FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
- ElementsKind to_kind, FixedArrayBase* to_elements);
+ static void PrintElementsTransition(
+ FILE* file, Handle<JSObject> object,
+ ElementsKind from_kind, Handle<FixedArrayBase> from_elements,
+ ElementsKind to_kind, Handle<FixedArrayBase> to_elements);
void PrintInstanceMigration(FILE* file, Map* original_map, Map* new_map);
@@ -2699,7 +2776,6 @@ class JSObject: public JSReceiver {
static void UpdateAllocationSite(Handle<JSObject> object,
ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
// Used from Object::GetProperty().
static Handle<Object> GetPropertyWithFailedAccessCheck(
@@ -2713,12 +2789,14 @@ class JSObject: public JSReceiver {
Object* structure,
uint32_t index,
Object* holder);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithInterceptor(
- JSReceiver* receiver,
+ static PropertyAttributes GetElementAttributeWithInterceptor(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
uint32_t index,
bool continue_search);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithoutInterceptor(
- JSReceiver* receiver,
+ static PropertyAttributes GetElementAttributeWithoutInterceptor(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
uint32_t index,
bool continue_search);
static Handle<Object> SetElementWithCallback(
@@ -2727,13 +2805,13 @@ class JSObject: public JSReceiver {
uint32_t index,
Handle<Object> value,
Handle<JSObject> holder,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetElementWithInterceptor(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
static Handle<Object> SetElementWithoutInterceptor(
@@ -2741,7 +2819,7 @@ class JSObject: public JSReceiver {
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
static Handle<Object> SetElementWithCallbackSetterInPrototypes(
@@ -2749,20 +2827,20 @@ class JSObject: public JSReceiver {
uint32_t index,
Handle<Object> value,
bool* found,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetDictionaryElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode = SET_PROPERTY);
static Handle<Object> SetFastDoubleElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype = true);
// Searches the prototype chain for property 'name'. If it is found and
@@ -2774,14 +2852,14 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done);
static Handle<Object> SetPropertyPostInterceptor(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetPropertyUsingTransition(
Handle<JSObject> object,
LookupResult* lookup,
@@ -2794,7 +2872,7 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
bool check_prototype,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
// Add a property to an object.
static Handle<Object> AddProperty(
@@ -2802,7 +2880,7 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
ValueType value_type = OPTIMAL_REPRESENTATION,
@@ -2830,15 +2908,6 @@ class JSObject: public JSReceiver {
ValueType value_type,
TransitionFlag flag);
- // Add a property to a fast-case object using a map transition to
- // new_map.
- static void AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> new_map,
- Handle<Name> name,
- Handle<Object> value,
- int field_index,
- Representation representation);
-
// Add a property to a slow-case object.
static void AddSlowProperty(Handle<JSObject> object,
Handle<Name> name,
@@ -2875,7 +2944,7 @@ class JSObject: public JSReceiver {
// Gets the current elements capacity and the number of used elements.
void GetElementsCapacityAndUsage(int* capacity, int* used);
- bool CanSetCallback(Name* name);
+ static bool CanSetCallback(Handle<JSObject> object, Handle<Name> name);
static void SetElementCallback(Handle<JSObject> object,
uint32_t index,
Handle<Object> structure,
@@ -3068,6 +3137,8 @@ class FixedDoubleArray: public FixedArrayBase {
inline double get_scalar(int index);
inline int64_t get_representation(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
+ // TODO(ishell): Rename as get() once all usages handlified.
+ inline Handle<Object> get_as_handle(int index);
inline void set(int index, double value);
inline void set_the_hole(int index);
@@ -3114,29 +3185,35 @@ class FixedDoubleArray: public FixedArrayBase {
// ConstantPoolArray describes a fixed-sized array containing constant pool
// entires.
// The format of the pool is:
-// [0]: Field holding the first index which is a pointer entry
-// [1]: Field holding the first index which is a int32 entry
-// [2] ... [first_ptr_index() - 1]: 64 bit entries
-// [first_ptr_index()] ... [first_int32_index() - 1]: pointer entries
-// [first_int32_index()] ... [length - 1]: 32 bit entries
+// [0]: Field holding the first index which is a raw code target pointer entry
+// [1]: Field holding the first index which is a heap pointer entry
+// [2]: Field holding the first index which is a int32 entry
+// [3] ... [first_code_ptr_index() - 1] : 64 bit entries
+// [first_code_ptr_index()] ... [first_heap_ptr_index() - 1] : code pointers
+// [first_heap_ptr_index()] ... [first_int32_index() - 1] : heap pointers
+// [first_int32_index()] ... [length - 1] : 32 bit entries
class ConstantPoolArray: public FixedArrayBase {
public:
// Getters for the field storing the first index for different type entries.
- inline int first_ptr_index();
+ inline int first_code_ptr_index();
+ inline int first_heap_ptr_index();
inline int first_int64_index();
inline int first_int32_index();
// Getters for counts of different type entries.
- inline int count_of_ptr_entries();
+ inline int count_of_code_ptr_entries();
+ inline int count_of_heap_ptr_entries();
inline int count_of_int64_entries();
inline int count_of_int32_entries();
// Setter and getter for pool elements.
- inline Object* get_ptr_entry(int index);
+ inline Address get_code_ptr_entry(int index);
+ inline Object* get_heap_ptr_entry(int index);
inline int64_t get_int64_entry(int index);
inline int32_t get_int32_entry(int index);
inline double get_int64_entry_as_double(int index);
+ inline void set(int index, Address value);
inline void set(int index, Object* value);
inline void set(int index, int64_t value);
inline void set(int index, double value);
@@ -3144,7 +3221,8 @@ class ConstantPoolArray: public FixedArrayBase {
// Set up initial state.
inline void SetEntryCounts(int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries);
// Copy operations
@@ -3152,10 +3230,12 @@ class ConstantPoolArray: public FixedArrayBase {
// Garbage collection support.
inline static int SizeFor(int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries) {
return RoundUp(OffsetAt(number_of_int64_entries,
- number_of_ptr_entries,
+ number_of_code_ptr_entries,
+ number_of_heap_ptr_entries,
number_of_int32_entries),
kPointerSize);
}
@@ -3164,22 +3244,33 @@ class ConstantPoolArray: public FixedArrayBase {
inline int OffsetOfElementAt(int index) {
ASSERT(index < length());
if (index >= first_int32_index()) {
- return OffsetAt(count_of_int64_entries(), count_of_ptr_entries(),
- index - first_int32_index());
- } else if (index >= first_ptr_index()) {
- return OffsetAt(count_of_int64_entries(), index - first_ptr_index(), 0);
+ return OffsetAt(count_of_int64_entries(), count_of_code_ptr_entries(),
+ count_of_heap_ptr_entries(), index - first_int32_index());
+ } else if (index >= first_heap_ptr_index()) {
+ return OffsetAt(count_of_int64_entries(), count_of_code_ptr_entries(),
+ index - first_heap_ptr_index(), 0);
+ } else if (index >= first_code_ptr_index()) {
+ return OffsetAt(count_of_int64_entries(), index - first_code_ptr_index(),
+ 0, 0);
} else {
- return OffsetAt(index, 0, 0);
+ return OffsetAt(index, 0, 0, 0);
}
}
// Casting.
static inline ConstantPoolArray* cast(Object* obj);
+ // Garbage collection support.
+ Object** RawFieldOfElementAt(int index) {
+ return HeapObject::RawField(this, OffsetOfElementAt(index));
+ }
+
// Layout description.
- static const int kFirstPointerIndexOffset = FixedArray::kHeaderSize;
+ static const int kFirstCodePointerIndexOffset = FixedArray::kHeaderSize;
+ static const int kFirstHeapPointerIndexOffset =
+ kFirstCodePointerIndexOffset + kPointerSize;
static const int kFirstInt32IndexOffset =
- kFirstPointerIndexOffset + kPointerSize;
+ kFirstHeapPointerIndexOffset + kPointerSize;
static const int kFirstOffset = kFirstInt32IndexOffset + kPointerSize;
// Dispatched behavior.
@@ -3189,15 +3280,18 @@ class ConstantPoolArray: public FixedArrayBase {
DECLARE_VERIFIER(ConstantPoolArray)
private:
- inline void set_first_ptr_index(int value);
+ inline void set_first_code_ptr_index(int value);
+ inline void set_first_heap_ptr_index(int value);
inline void set_first_int32_index(int value);
inline static int OffsetAt(int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries) {
return kFirstOffset
+ (number_of_int64_entries * kInt64Size)
- + (number_of_ptr_entries * kPointerSize)
+ + (number_of_code_ptr_entries * kPointerSize)
+ + (number_of_heap_ptr_entries * kPointerSize)
+ (number_of_int32_entries * kInt32Size);
}
@@ -3958,7 +4052,7 @@ class NameDictionary: public Dictionary<NameDictionaryShape, Name*> {
}
// Copies enumerable keys to preallocated fixed array.
- FixedArray* CopyEnumKeysTo(FixedArray* storage);
+ void CopyEnumKeysTo(FixedArray* storage);
static void DoGenerateNewEnumerationIndices(
Handle<NameDictionary> dictionary);
@@ -4272,13 +4366,11 @@ class ScopeInfo : public FixedArray {
// Does this scope call eval?
bool CallsEval();
- // Return the language mode of this scope.
- LanguageMode language_mode();
+ // Return the strict mode of this scope.
+ StrictMode strict_mode();
- // Does this scope make a non-strict eval call?
- bool CallsNonStrictEval() {
- return CallsEval() && (language_mode() == CLASSIC_MODE);
- }
+ // Does this scope make a sloppy eval call?
+ bool CallsSloppyEval() { return CallsEval() && strict_mode() == SLOPPY; }
// Return the total number of locals allocated on the stack and in the
// context. This includes the parameters that are allocated in the context.
@@ -4452,9 +4544,9 @@ class ScopeInfo : public FixedArray {
// Properties of scopes.
class ScopeTypeField: public BitField<ScopeType, 0, 3> {};
class CallsEvalField: public BitField<bool, 3, 1> {};
- class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
- class FunctionVariableField: public BitField<FunctionVariableInfo, 6, 2> {};
- class FunctionVariableMode: public BitField<VariableMode, 8, 3> {};
+ class StrictModeField: public BitField<StrictMode, 4, 1> {};
+ class FunctionVariableField: public BitField<FunctionVariableInfo, 5, 2> {};
+ class FunctionVariableMode: public BitField<VariableMode, 7, 3> {};
// BitFields representing the encoded information for context locals in the
// ContextLocalInfoEntries part.
@@ -4879,6 +4971,11 @@ class FixedTypedArrayBase: public FixedArrayBase {
inline int size();
+ // Use with care: returns raw pointer into heap.
+ inline void* DataPtr();
+
+ inline int DataSize();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
};
@@ -4905,6 +5002,9 @@ class FixedTypedArray: public FixedTypedArrayBase {
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, ElementType value);
+ static inline ElementType from_int(int value);
+ static inline ElementType from_double(double value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
@@ -4927,7 +5027,7 @@ class FixedTypedArray: public FixedTypedArrayBase {
static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
static const char* Designator() { return #type " array"; } \
static inline MaybeObject* ToObject(Heap* heap, elementType scalar); \
- static elementType defaultValue() { return 0; } \
+ static inline elementType defaultValue(); \
}; \
\
typedef FixedTypedArray<Type##ArrayTraits> Fixed##Type##Array;
@@ -4951,7 +5051,9 @@ class DeoptimizationInputData: public FixedArray {
static const int kLiteralArrayIndex = 2;
static const int kOsrAstIdIndex = 3;
static const int kOsrPcOffsetIndex = 4;
- static const int kFirstDeoptEntryIndex = 5;
+ static const int kOptimizationIdIndex = 5;
+ static const int kSharedFunctionInfoIndex = 6;
+ static const int kFirstDeoptEntryIndex = 7;
// Offsets of deopt entry elements relative to the start of the entry.
static const int kAstIdRawOffset = 0;
@@ -4974,6 +5076,8 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+ DEFINE_ELEMENT_ACCESSORS(OptimizationId, Smi)
+ DEFINE_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
#undef DEFINE_ELEMENT_ACCESSORS
@@ -5069,49 +5173,6 @@ class DeoptimizationOutputData: public FixedArray {
// Forward declaration.
class Cell;
class PropertyCell;
-
-// TypeFeedbackCells is a fixed array used to hold the association between
-// cache cells and AST ids for code generated by the full compiler.
-// The format of the these objects is
-// [i * 2]: Global property cell of ith cache cell.
-// [i * 2 + 1]: Ast ID for ith cache cell.
-class TypeFeedbackCells: public FixedArray {
- public:
- int CellCount() { return length() / 2; }
- static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
-
- // Accessors for AST ids associated with cache values.
- inline TypeFeedbackId AstId(int index);
- inline void SetAstId(int index, TypeFeedbackId id);
-
- // Accessors for global property cells holding the cache values.
- inline Cell* GetCell(int index);
- inline void SetCell(int index, Cell* cell);
-
- // The object that indicates an uninitialized cache.
- static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
-
- // The object that indicates a megamorphic state.
- static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
-
- // The object that indicates a monomorphic state of Array with
- // ElementsKind
- static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
- ElementsKind elements_kind);
-
- // A raw version of the uninitialized sentinel that's safe to read during
- // garbage collection (e.g., for patching the cache).
- static inline Object* RawUninitializedSentinel(Heap* heap);
-
- // Casting.
- static inline TypeFeedbackCells* cast(Object* obj);
-
- static const int kForInFastCaseMarker = 0;
- static const int kForInSlowCaseMarker = 1;
-};
-
-
-// Forward declaration.
class SafepointEntry;
class TypeFeedbackInfo;
@@ -5192,7 +5253,6 @@ class Code: public HeapObject {
// the kind of the code object.
// FUNCTION => type feedback information.
// STUB => various things, e.g. a SMI
- // OPTIMIZED_FUNCTION => the next_code_link for optimized code list.
DECL_ACCESSORS(raw_type_feedback_info, Object)
inline Object* type_feedback_info();
inline void set_type_feedback_info(
@@ -5230,24 +5290,10 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
- inline Kind handler_kind() {
- return static_cast<Kind>(arguments_count());
- }
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
- inline ExtraICState extended_extra_ic_state(); // Only valid for
- // non-call IC stubs.
- static bool needs_extended_extra_ic_state(Kind kind) {
- // TODO(danno): This is a bit of a hack right now since there are still
- // clients of this API that pass "extra" values in for argc. These clients
- // should be retrofitted to used ExtendedExtraICState.
- return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
- kind == BINARY_OP_IC;
- }
-
inline StubType type(); // Only valid for monomorphic IC stubs.
- inline int arguments_count(); // Only valid for call IC stubs.
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
@@ -5262,6 +5308,7 @@ class Code: public HeapObject {
inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
inline bool is_keyed_stub();
+ inline bool is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
inline void set_raw_kind_specific_flags1(int value);
inline void set_raw_kind_specific_flags2(int value);
@@ -5348,7 +5395,6 @@ class Code: public HeapObject {
// Find an object in a stub with a specified map
Object* FindNthObject(int n, Map* match_map);
- void ReplaceNthObject(int n, Map* match_map, Object* replace_with);
// Find the first allocation site in an IC stub.
AllocationSite* FindFirstAllocationSite();
@@ -5357,7 +5403,6 @@ class Code: public HeapObject {
Map* FindFirstMap();
void FindAllMaps(MapHandleList* maps);
void FindAllTypes(TypeHandleList* types);
- void ReplaceFirstMap(Map* replace);
// Find the first handler in an IC stub.
Code* FindFirstHandler();
@@ -5369,7 +5414,12 @@ class Code: public HeapObject {
// Find the first name in an IC stub.
Name* FindFirstName();
- void ReplaceNthCell(int n, Cell* replace_with);
+ class FindAndReplacePattern;
+ // For each (map-to-find, object-to-replace) pair in the pattern, this
+ // function replaces the corresponding placeholder in the code with the
+ // object-to-replace. The function assumes that pairs in the pattern come in
+ // the same order as the placeholders in the code.
+ void FindAndReplace(const FindAndReplacePattern& pattern);
// The entire code object including its header is copied verbatim to the
// snapshot so that it can be written in one, fast, memcpy during
@@ -5386,23 +5436,24 @@ class Code: public HeapObject {
InlineCacheState ic_state = UNINITIALIZED,
ExtraICState extra_ic_state = kNoExtraICState,
StubType type = NORMAL,
- int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
ExtraICState extra_ic_state = kNoExtraICState,
InlineCacheHolderFlag holder = OWN_MAP,
+ StubType type = NORMAL);
+
+ static inline Flags ComputeHandlerFlags(
+ Kind handler_kind,
StubType type = NORMAL,
- int argc = -1);
+ InlineCacheHolderFlag holder = OWN_MAP);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
static inline StubType ExtractTypeFromFlags(Flags flags);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
- static inline ExtraICState ExtractExtendedExtraICStateFromFlags(Flags flags);
- static inline int ExtractArgumentsCountFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
@@ -5472,7 +5523,7 @@ class Code: public HeapObject {
void ClearInlineCaches();
void ClearInlineCaches(Kind kind);
- void ClearTypeFeedbackCells(Heap* heap);
+ void ClearTypeFeedbackInfo(Heap* heap);
BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
@@ -5516,7 +5567,11 @@ class Code: public HeapObject {
void VerifyEmbeddedObjectsDependency();
#endif
- static bool IsWeakEmbeddedObject(Kind kind, Object* object);
+ inline bool IsWeakObject(Object* object) {
+ return is_optimized_code() && IsWeakObjectInOptimizedCode(object);
+ }
+
+ inline bool IsWeakObjectInOptimizedCode(Object* object);
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
@@ -5530,8 +5585,8 @@ class Code: public HeapObject {
kHandlerTableOffset + kPointerSize;
static const int kTypeFeedbackInfoOffset =
kDeoptimizationDataOffset + kPointerSize;
- static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset; // Shared.
- static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
+ static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
+ static const int kGCMetadataOffset = kNextCodeLinkOffset + kPointerSize;
static const int kICAgeOffset =
kGCMetadataOffset + kPointerSize;
static const int kFlagsOffset = kICAgeOffset + kIntSize;
@@ -5567,10 +5622,8 @@ class Code: public HeapObject {
class CacheHolderField: public BitField<InlineCacheHolderFlag, 5, 1> {};
class KindField: public BitField<Kind, 6, 4> {};
// TODO(bmeurer): Bit 10 is available for free use. :-)
- class ExtraICStateField: public BitField<ExtraICState, 11, 6> {};
- class ExtendedExtraICStateField: public BitField<ExtraICState, 11,
+ class ExtraICStateField: public BitField<ExtraICState, 11,
PlatformSmiTagging::kSmiValueSize - 11 + 1> {}; // NOLINT
- STATIC_ASSERT(ExtraICStateField::kShift == ExtendedExtraICStateField::kShift);
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
@@ -5624,26 +5677,16 @@ class Code: public HeapObject {
class BackEdgesPatchedForOSRField: public BitField<bool,
kIsCrankshaftedBit + 1 + 29, 1> {}; // NOLINT
- // Signed field cannot be encoded using the BitField class.
- static const int kArgumentsCountShift = 17;
- static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
- static const int kArgumentsBits =
- PlatformSmiTagging::kSmiValueSize - Code::kArgumentsCountShift + 1;
+ static const int kArgumentsBits = 16;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
- // ICs can use either argument count or ExtendedExtraIC, since their storage
- // overlaps.
- STATIC_ASSERT(ExtraICStateField::kShift +
- ExtraICStateField::kSize + kArgumentsBits ==
- ExtendedExtraICStateField::kShift +
- ExtendedExtraICStateField::kSize);
-
// This constant should be encodable in an ARM instruction.
static const int kFlagsNotUsedInLookup =
TypeField::kMask | CacheHolderField::kMask;
private:
friend class RelocIterator;
+ friend class Deoptimizer; // For FindCodeAgeSequence.
void ClearInlineCaches(Kind* kind);
@@ -5926,8 +5969,8 @@ class Map: public HeapObject {
return IsFastElementsKind(elements_kind());
}
- inline bool has_non_strict_arguments_elements() {
- return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+ inline bool has_sloppy_arguments_elements() {
+ return elements_kind() == SLOPPY_ARGUMENTS_ELEMENTS;
}
inline bool has_external_array_elements() {
@@ -5944,7 +5987,7 @@ class Map: public HeapObject {
inline bool has_slow_elements_kind() {
return elements_kind() == DICTIONARY_ELEMENTS
- || elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+ || elements_kind() == SLOPPY_ARGUMENTS_ELEMENTS;
}
static bool IsValidElementsTransition(ElementsKind from_kind,
@@ -6203,8 +6246,11 @@ class Map: public HeapObject {
Descriptor* descriptor,
int index,
TransitionFlag flag);
+
MUST_USE_RESULT MaybeObject* AsElementsKind(ElementsKind kind);
+ static Handle<Map> AsElementsKind(Handle<Map> map, ElementsKind kind);
+
MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
TransitionFlag flag);
@@ -6528,9 +6574,6 @@ class Script: public Struct {
// extracted.
DECL_ACCESSORS(column_offset, Smi)
- // [data]: additional data associated with this script.
- DECL_ACCESSORS(data, Object)
-
// [context_data]: context data for the context this script was compiled in.
DECL_ACCESSORS(context_data, Object)
@@ -6584,8 +6627,7 @@ class Script: public Struct {
static const int kNameOffset = kSourceOffset + kPointerSize;
static const int kLineOffsetOffset = kNameOffset + kPointerSize;
static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
- static const int kDataOffset = kColumnOffsetOffset + kPointerSize;
- static const int kContextOffset = kDataOffset + kPointerSize;
+ static const int kContextOffset = kColumnOffsetOffset + kPointerSize;
static const int kWrapperOffset = kContextOffset + kPointerSize;
static const int kTypeOffset = kWrapperOffset + kPointerSize;
static const int kLineEndsOffset = kTypeOffset + kPointerSize;
@@ -6643,7 +6685,9 @@ enum BuiltinFunctionId {
#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
- kMathPowHalf
+ kMathPowHalf,
+ // Installed only on --harmony-maths.
+ kMathClz32
};
@@ -6701,13 +6745,6 @@ class SharedFunctionInfo: public HeapObject {
static const int kLiteralsOffset = 2;
static const int kOsrAstIdOffset = 3;
static const int kEntryLength = 4;
- static const int kFirstContextSlot = FixedArray::kHeaderSize +
- (kEntriesStart + kContextOffset) * kPointerSize;
- static const int kFirstCodeSlot = FixedArray::kHeaderSize +
- (kEntriesStart + kCachedCodeOffset) * kPointerSize;
- static const int kFirstOsrAstIdSlot = FixedArray::kHeaderSize +
- (kEntriesStart + kOsrAstIdOffset) * kPointerSize;
- static const int kSecondEntryIndex = kEntryLength + kEntriesStart;
static const int kInitialLength = kEntriesStart + kEntryLength;
// [scope_info]: Scope info.
@@ -6926,20 +6963,9 @@ class SharedFunctionInfo: public HeapObject {
// spending time attempting to optimize it again.
DECL_BOOLEAN_ACCESSORS(optimization_disabled)
- // Indicates the language mode of the function's code as defined by the
- // current harmony drafts for the next ES language standard. Possible
- // values are:
- // 1. CLASSIC_MODE - Unrestricted syntax and semantics, same as in ES5.
- // 2. STRICT_MODE - Restricted syntax and semantics, same as in ES5.
- // 3. EXTENDED_MODE - Only available under the harmony flag, not part of ES5.
- inline LanguageMode language_mode();
- inline void set_language_mode(LanguageMode language_mode);
-
- // Indicates whether the language mode of this function is CLASSIC_MODE.
- inline bool is_classic_mode();
-
- // Indicates whether the language mode of this function is EXTENDED_MODE.
- inline bool is_extended_mode();
+ // Indicates the language mode.
+ inline StrictMode strict_mode();
+ inline void set_strict_mode(StrictMode strict_mode);
// False if the function definitely does not allocate an arguments object.
DECL_BOOLEAN_ACCESSORS(uses_arguments)
@@ -7188,7 +7214,6 @@ class SharedFunctionInfo: public HeapObject {
kLiveObjectsMayExist,
kOptimizationDisabled,
kStrictModeFunction,
- kExtendedModeFunction,
kUsesArguments,
kHasDuplicateParameters,
kNative,
@@ -7233,26 +7258,18 @@ class SharedFunctionInfo: public HeapObject {
static const int kStrictModeBitWithinByte =
(kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
- static const int kExtendedModeBitWithinByte =
- (kExtendedModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
-
static const int kNativeBitWithinByte =
(kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
#if __BYTE_ORDER == __LITTLE_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kExtendedModeByteOffset = kCompilerHintsOffset +
- (kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
static const int kNativeByteOffset = kCompilerHintsOffset +
(kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
#elif __BYTE_ORDER == __BIG_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kExtendedModeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
static const int kNativeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
@@ -7399,9 +7416,6 @@ class JSFunction: public JSObject {
void MarkForConcurrentOptimization();
void MarkInOptimizationQueue();
- static bool CompileOptimized(Handle<JSFunction> function,
- ClearExceptionFlag flag);
-
// Tells whether or not the function is already marked for lazy
// recompilation.
inline bool IsMarkedForOptimization();
@@ -7803,9 +7817,6 @@ class JSMessageObject: public JSObject {
// [script]: the script from which the error message originated.
DECL_ACCESSORS(script, Object)
- // [stack_trace]: the stack trace for this error message.
- DECL_ACCESSORS(stack_trace, Object)
-
// [stack_frames]: an array of stack frames for this error object.
DECL_ACCESSORS(stack_frames, Object)
@@ -7828,8 +7839,7 @@ class JSMessageObject: public JSObject {
static const int kTypeOffset = JSObject::kHeaderSize;
static const int kArgumentsOffset = kTypeOffset + kPointerSize;
static const int kScriptOffset = kArgumentsOffset + kPointerSize;
- static const int kStackTraceOffset = kScriptOffset + kPointerSize;
- static const int kStackFramesOffset = kStackTraceOffset + kPointerSize;
+ static const int kStackFramesOffset = kScriptOffset + kPointerSize;
static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
static const int kSize = kEndPositionOffset + kPointerSize;
@@ -8010,7 +8020,7 @@ class CompilationCacheTable: public HashTable<CompilationCacheShape,
Object* Lookup(String* src, Context* context);
Object* LookupEval(String* src,
Context* context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position);
Object* LookupRegExp(String* source, JSRegExp::Flags flags);
MUST_USE_RESULT MaybeObject* Put(String* src,
@@ -8188,7 +8198,7 @@ class TypeFeedbackInfo: public Struct {
inline void set_inlined_type_change_checksum(int checksum);
inline bool matches_inlined_type_change_checksum(int checksum);
- DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
+ DECL_ACCESSORS(feedback_vector, FixedArray)
static inline TypeFeedbackInfo* cast(Object* obj);
@@ -8198,8 +8208,27 @@ class TypeFeedbackInfo: public Struct {
static const int kStorage1Offset = HeapObject::kHeaderSize;
static const int kStorage2Offset = kStorage1Offset + kPointerSize;
- static const int kTypeFeedbackCellsOffset = kStorage2Offset + kPointerSize;
- static const int kSize = kTypeFeedbackCellsOffset + kPointerSize;
+ static const int kFeedbackVectorOffset =
+ kStorage2Offset + kPointerSize;
+ static const int kSize = kFeedbackVectorOffset + kPointerSize;
+
+ // The object that indicates an uninitialized cache.
+ static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
+
+ // The object that indicates a megamorphic state.
+ static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+
+ // The object that indicates a monomorphic state of Array with
+ // ElementsKind
+ static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind);
+
+ // A raw version of the uninitialized sentinel that's safe to read during
+ // garbage collection (e.g., for patching the cache).
+ static inline Object* RawUninitializedSentinel(Heap* heap);
+
+ static const int kForInFastCaseMarker = 0;
+ static const int kForInSlowCaseMarker = 1;
private:
static const int kTypeChangeChecksumBits = 7;
@@ -8262,8 +8291,9 @@ class AllocationSite: public Struct {
class DoNotInlineBit: public BitField<bool, 29, 1> {};
// Bitfields for pretenure_data
- class MementoFoundCountBits: public BitField<int, 0, 28> {};
- class PretenureDecisionBits: public BitField<PretenureDecision, 28, 2> {};
+ class MementoFoundCountBits: public BitField<int, 0, 27> {};
+ class PretenureDecisionBits: public BitField<PretenureDecision, 27, 2> {};
+ class DeoptDependentCodeBit: public BitField<bool, 29, 1> {};
STATIC_ASSERT(PretenureDecisionBits::kMax >= kLastPretenureDecisionValue);
// Increments the mementos found counter and returns true when the first
@@ -8288,6 +8318,18 @@ class AllocationSite: public Struct {
SKIP_WRITE_BARRIER);
}
+ bool deopt_dependent_code() {
+ int value = pretenure_data()->value();
+ return DeoptDependentCodeBit::decode(value);
+ }
+
+ void set_deopt_dependent_code(bool deopt) {
+ int value = pretenure_data()->value();
+ set_pretenure_data(
+ Smi::FromInt(DeoptDependentCodeBit::update(value, deopt)),
+ SKIP_WRITE_BARRIER);
+ }
+
int memento_found_count() {
int value = pretenure_data()->value();
return MementoFoundCountBits::decode(value);
@@ -8345,7 +8387,8 @@ class AllocationSite: public Struct {
return transition_info()->IsJSArray() || transition_info()->IsJSObject();
}
- MaybeObject* DigestTransitionFeedback(ElementsKind to_kind);
+ static void DigestTransitionFeedback(Handle<AllocationSite> site,
+ ElementsKind to_kind);
enum Reason {
TENURING,
@@ -8421,8 +8464,8 @@ class AllocationMemento: public Struct {
};
-// Representation of a slow alias as part of a non-strict arguments objects.
-// For fast aliases (if HasNonStrictArgumentsElements()):
+// Representation of a slow alias as part of a sloppy arguments objects.
+// For fast aliases (if HasSloppyArgumentsElements()):
// - the parameter map contains an index into the context
// - all attributes of the element have default values
// For slow aliases (if HasDictionaryArgumentsElements()):
@@ -8627,7 +8670,7 @@ class Name: public HeapObject {
// kMaxCachedArrayIndexLength.
STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
- static const int kContainsCachedArrayIndexMask =
+ static const unsigned int kContainsCachedArrayIndexMask =
(~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
kIsNotArrayIndexMask;
@@ -8876,7 +8919,7 @@ class String: public Name {
static const int kEmptyStringHash = kIsNotArrayIndexMask;
// Maximal string length.
- static const int kMaxLength = (1 << (32 - 2)) - 1;
+ static const int kMaxLength = (1 << 28) - 16;
// Max length for computing hash. For strings longer than this limit the
// string length is used as the hash value.
@@ -9038,9 +9081,7 @@ class SeqOneByteString: public SeqString {
// Maximal memory usage for a single sequential ASCII string.
static const int kMaxSize = 512 * MB - 1;
- // Maximal length of a single sequential ASCII string.
- // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
- static const int kMaxLength = (kMaxSize - kHeaderSize);
+ STATIC_CHECK((kMaxSize - kHeaderSize) >= String::kMaxLength);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
@@ -9080,9 +9121,8 @@ class SeqTwoByteString: public SeqString {
// Maximal memory usage for a single sequential two-byte string.
static const int kMaxSize = 512 * MB - 1;
- // Maximal length of a single sequential two-byte string.
- // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
+ STATIC_CHECK(static_cast<int>((kMaxSize - kHeaderSize)/sizeof(uint16_t)) >=
+ String::kMaxLength);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
@@ -9600,14 +9640,16 @@ class JSProxy: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done);
- MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
- JSReceiver* receiver,
- Name* name);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
- JSReceiver* receiver,
+ static PropertyAttributes GetPropertyAttributeWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name);
+ static PropertyAttributes GetElementAttributeWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
uint32_t index);
// Turn the proxy into an (empty) JSObject.
@@ -9651,12 +9693,12 @@ class JSProxy: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static bool HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name);
static bool HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index);
@@ -9898,6 +9940,8 @@ class JSTypedArray: public JSArrayBufferView {
ExternalArrayType type();
size_t element_size();
+ Handle<JSArrayBuffer> GetBuffer();
+
// Dispatched behavior.
DECLARE_PRINTER(JSTypedArray)
DECLARE_VERIFIER(JSTypedArray)
@@ -9909,6 +9953,9 @@ class JSTypedArray: public JSArrayBufferView {
kSize + v8::ArrayBufferView::kInternalFieldCount * kPointerSize;
private:
+ static Handle<JSArrayBuffer> MaterializeArrayBuffer(
+ Handle<JSTypedArray> typed_array);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
};
@@ -9993,22 +10040,30 @@ class JSArray: public JSObject {
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
// capacity is non-zero.
- MUST_USE_RESULT MaybeObject* Initialize(int capacity, int length = 0);
+ static void Initialize(Handle<JSArray> array, int capacity, int length = 0);
// Initializes the array to a certain length.
inline bool AllowsSetElementsLength();
// Can cause GC.
- MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
+ static Handle<Object> SetElementsLength(Handle<JSArray> array,
+ Handle<Object> length);
// Set the content of the array to the content of storage.
- MUST_USE_RESULT inline MaybeObject* SetContent(FixedArrayBase* storage);
+ static inline void SetContent(Handle<JSArray> array,
+ Handle<FixedArrayBase> storage);
// Casting.
static inline JSArray* cast(Object* obj);
- // Uses handles. Ensures that the fixed array backing the JSArray has at
+ // Ensures that the fixed array backing the JSArray has at
// least the stated size.
- inline void EnsureSize(int minimum_size_of_backing_fixed_array);
+ static inline void EnsureSize(Handle<JSArray> array,
+ int minimum_size_of_backing_fixed_array);
+
+ // Expand the fixed array backing of a fast-case JSArray to at least
+ // the requested size.
+ static void Expand(Handle<JSArray> array,
+ int minimum_size_of_backing_fixed_array);
// Dispatched behavior.
DECLARE_PRINTER(JSArray)
@@ -10022,10 +10077,6 @@ class JSArray: public JSObject {
static const int kSize = kLengthOffset + kPointerSize;
private:
- // Expand the fixed array backing of a fast-case JSArray to at least
- // the requested size.
- void Expand(int minimum_size_of_backing_fixed_array);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
};
@@ -10646,6 +10697,7 @@ class BreakPointInfo: public Struct {
V(kStringTable, "string_table", "(Internalized strings)") \
V(kExternalStringsTable, "external_strings_table", "(External strings)") \
V(kStrongRootList, "strong_root_list", "(Strong roots)") \
+ V(kSmiRootList, "smi_root_list", "(Smi roots)") \
V(kInternalizedString, "internalized_string", "(Internal string)") \
V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
V(kTop, "top", "(Isolate)") \
@@ -10685,6 +10737,9 @@ class ObjectVisitor BASE_EMBEDDED {
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+ // Visit weak next_code_link in Code object.
+ virtual void VisitNextCodeLink(Object** p) { VisitPointers(p, p + 1); }
+
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects..
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index d21507084c..fb3eac5d50 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -258,9 +258,13 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
BackEdgeTable::RemoveStackCheck(code, offset);
} else {
- Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
- function->ReplaceCode(
- code.is_null() ? function->shared()->code() : *code);
+ if (function->IsOptimized()) {
+ DisposeOptimizedCompileJob(job, false);
+ } else {
+ Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
+ function->ReplaceCode(
+ code.is_null() ? function->shared()->code() : *code);
+ }
}
}
}
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 5e7680e6c1..a00adb8c1e 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -33,7 +33,6 @@
#include "char-predicates-inl.h"
#include "codegen.h"
#include "compiler.h"
-#include "func-name-inferrer.h"
#include "messages.h"
#include "parser.h"
#include "platform.h"
@@ -46,49 +45,6 @@
namespace v8 {
namespace internal {
-// PositionStack is used for on-stack allocation of token positions for
-// new expressions. Please look at ParseNewExpression.
-
-class PositionStack {
- public:
- explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
- ~PositionStack() {
- ASSERT(!*ok_ || is_empty());
- USE(ok_);
- }
-
- class Element {
- public:
- Element(PositionStack* stack, int value) {
- previous_ = stack->top();
- value_ = value;
- stack->set_top(this);
- }
-
- private:
- Element* previous() { return previous_; }
- int value() { return value_; }
- friend class PositionStack;
- Element* previous_;
- int value_;
- };
-
- bool is_empty() { return top_ == NULL; }
- int pop() {
- ASSERT(!is_empty());
- int result = top_->value();
- top_ = top_->previous();
- return result;
- }
-
- private:
- Element* top() { return top_; }
- void set_top(Element* value) { top_ = value; }
- Element* top_;
- bool* ok_;
-};
-
-
RegExpBuilder::RegExpBuilder(Zone* zone)
: zone_(zone),
pending_empty_(false),
@@ -249,25 +205,6 @@ void RegExpBuilder::AddQuantifierToAtom(
}
-Handle<String> Parser::LookupSymbol(int symbol_id) {
- // Length of symbol cache is the number of identified symbols.
- // If we are larger than that, or negative, it's not a cached symbol.
- // This might also happen if there is no preparser symbol data, even
- // if there is some preparser data.
- if (static_cast<unsigned>(symbol_id)
- >= static_cast<unsigned>(symbol_cache_.length())) {
- if (scanner().is_literal_ascii()) {
- return isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
- } else {
- return isolate()->factory()->InternalizeTwoByteString(
- scanner().literal_utf16_string());
- }
- }
- return LookupCachedSymbol(symbol_id);
-}
-
-
Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
// Make sure the cache is large enough to hold the symbol identifier.
if (symbol_cache_.length() <= symbol_id) {
@@ -277,13 +214,8 @@ Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
}
Handle<String> result = symbol_cache_.at(symbol_id);
if (result.is_null()) {
- if (scanner().is_literal_ascii()) {
- result = isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
- } else {
- result = isolate()->factory()->InternalizeTwoByteString(
- scanner().literal_utf16_string());
- }
+ result = scanner()->AllocateInternalizedString(isolate_);
+ ASSERT(!result.is_null());
symbol_cache_.at(symbol_id) = result;
return result;
}
@@ -463,54 +395,6 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
-// FunctionState and BlockState together implement the parser's scope stack.
-// The parser's current scope is in top_scope_. The BlockState and
-// FunctionState constructors push on the scope stack and the destructors
-// pop. They are also used to hold the parser's per-function and per-block
-// state.
-
-class Parser::BlockState BASE_EMBEDDED {
- public:
- BlockState(Parser* parser, Scope* scope)
- : parser_(parser),
- outer_scope_(parser->top_scope_) {
- parser->top_scope_ = scope;
- }
-
- ~BlockState() { parser_->top_scope_ = outer_scope_; }
-
- private:
- Parser* parser_;
- Scope* outer_scope_;
-};
-
-
-Parser::FunctionState::FunctionState(Parser* parser, Scope* scope)
- : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
- next_handler_index_(0),
- expected_property_count_(0),
- generator_object_variable_(NULL),
- parser_(parser),
- outer_function_state_(parser->current_function_state_),
- outer_scope_(parser->top_scope_),
- saved_ast_node_id_(parser->zone()->isolate()->ast_node_id()),
- factory_(parser->zone()) {
- parser->top_scope_ = scope;
- parser->current_function_state_ = this;
- parser->zone()->isolate()->set_ast_node_id(BailoutId::FirstUsable().ToInt());
-}
-
-
-Parser::FunctionState::~FunctionState() {
- parser_->top_scope_ = outer_scope_;
- parser_->current_function_state_ = outer_function_state_;
- if (outer_function_state_ != NULL) {
- parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
- }
-}
-
-
-// ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error
// handling for functions that may fail (by returning !*ok).
//
@@ -533,22 +417,371 @@ Parser::FunctionState::~FunctionState() {
// ----------------------------------------------------------------------------
// Implementation of Parser
+bool ParserTraits::IsEvalOrArguments(Handle<String> identifier) const {
+ return identifier.is_identical_to(
+ parser_->isolate()->factory()->eval_string()) ||
+ identifier.is_identical_to(
+ parser_->isolate()->factory()->arguments_string());
+}
+
+
+bool ParserTraits::IsThisProperty(Expression* expression) {
+ ASSERT(expression != NULL);
+ Property* property = expression->AsProperty();
+ return property != NULL &&
+ property->obj()->AsVariableProxy() != NULL &&
+ property->obj()->AsVariableProxy()->is_this();
+}
+
+
+bool ParserTraits::IsIdentifier(Expression* expression) {
+ VariableProxy* operand = expression->AsVariableProxy();
+ return operand != NULL && !operand->is_this();
+}
+
+
+void ParserTraits::PushPropertyName(FuncNameInferrer* fni,
+ Expression* expression) {
+ if (expression->IsPropertyName()) {
+ fni->PushLiteralName(expression->AsLiteral()->AsPropertyName());
+ } else {
+ fni->PushLiteralName(
+ parser_->isolate()->factory()->anonymous_function_string());
+ }
+}
+
+
+void ParserTraits::CheckAssigningFunctionLiteralToProperty(Expression* left,
+ Expression* right) {
+ ASSERT(left != NULL);
+ if (left->AsProperty() != NULL &&
+ right->AsFunctionLiteral() != NULL) {
+ right->AsFunctionLiteral()->set_pretenure();
+ }
+}
+
+
+void ParserTraits::CheckPossibleEvalCall(Expression* expression,
+ Scope* scope) {
+ VariableProxy* callee = expression->AsVariableProxy();
+ if (callee != NULL &&
+ callee->IsVariable(parser_->isolate()->factory()->eval_string())) {
+ scope->DeclarationScope()->RecordEvalCall();
+ }
+}
+
+
+Expression* ParserTraits::MarkExpressionAsLValue(Expression* expression) {
+ VariableProxy* proxy = expression != NULL
+ ? expression->AsVariableProxy()
+ : NULL;
+ if (proxy != NULL) proxy->MarkAsLValue();
+ return expression;
+}
+
+
+void ParserTraits::CheckStrictModeLValue(Expression* expression,
+ bool* ok) {
+ VariableProxy* lhs = expression != NULL
+ ? expression->AsVariableProxy()
+ : NULL;
+ if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
+ parser_->ReportMessage("strict_eval_arguments",
+ Vector<const char*>::empty());
+ *ok = false;
+ }
+}
+
+
+bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
+ Expression** x, Expression* y, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ if ((*x)->AsLiteral() && (*x)->AsLiteral()->value()->IsNumber() &&
+ y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) {
+ double x_val = (*x)->AsLiteral()->value()->Number();
+ double y_val = y->AsLiteral()->value()->Number();
+ switch (op) {
+ case Token::ADD:
+ *x = factory->NewNumberLiteral(x_val + y_val, pos);
+ return true;
+ case Token::SUB:
+ *x = factory->NewNumberLiteral(x_val - y_val, pos);
+ return true;
+ case Token::MUL:
+ *x = factory->NewNumberLiteral(x_val * y_val, pos);
+ return true;
+ case Token::DIV:
+ *x = factory->NewNumberLiteral(x_val / y_val, pos);
+ return true;
+ case Token::BIT_OR: {
+ int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::BIT_AND: {
+ int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::BIT_XOR: {
+ int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::SHL: {
+ int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::SHR: {
+ uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ uint32_t value = DoubleToUint32(x_val) >> shift;
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::SAR: {
+ uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+
+Expression* ParserTraits::BuildUnaryExpression(
+ Expression* expression, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ ASSERT(expression != NULL);
+ if (expression->AsLiteral() != NULL) {
+ Handle<Object> literal = expression->AsLiteral()->value();
+ if (op == Token::NOT) {
+ // Convert the literal to a boolean condition and negate it.
+ bool condition = literal->BooleanValue();
+ Handle<Object> result =
+ parser_->isolate()->factory()->ToBoolean(!condition);
+ return factory->NewLiteral(result, pos);
+ } else if (literal->IsNumber()) {
+ // Compute some expressions involving only number literals.
+ double value = literal->Number();
+ switch (op) {
+ case Token::ADD:
+ return expression;
+ case Token::SUB:
+ return factory->NewNumberLiteral(-value, pos);
+ case Token::BIT_NOT:
+ return factory->NewNumberLiteral(~DoubleToInt32(value), pos);
+ default:
+ break;
+ }
+ }
+ }
+ // Desugar '+foo' => 'foo*1'
+ if (op == Token::ADD) {
+ return factory->NewBinaryOperation(
+ Token::MUL, expression, factory->NewNumberLiteral(1, pos), pos);
+ }
+ // The same idea for '-foo' => 'foo*(-1)'.
+ if (op == Token::SUB) {
+ return factory->NewBinaryOperation(
+ Token::MUL, expression, factory->NewNumberLiteral(-1, pos), pos);
+ }
+ // ...and one more time for '~foo' => 'foo^(~0)'.
+ if (op == Token::BIT_NOT) {
+ return factory->NewBinaryOperation(
+ Token::BIT_XOR, expression, factory->NewNumberLiteral(~0, pos), pos);
+ }
+ return factory->NewUnaryOperation(op, expression, pos);
+}
+
+
+void ParserTraits::ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<const char*> args,
+ bool is_reference_error) {
+ if (parser_->stack_overflow()) {
+ // Suppress the error message (syntax error or such) in the presence of a
+ // stack overflow. The isolate allows only one pending exception at at time
+ // and we want to report the stack overflow later.
+ return;
+ }
+ MessageLocation location(parser_->script_,
+ source_location.beg_pos,
+ source_location.end_pos);
+ Factory* factory = parser_->isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
+ ASSERT(!arg_string.is_null());
+ elements->set(i, *arg_string);
+ }
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = is_reference_error
+ ? factory->NewReferenceError(message, array)
+ : factory->NewSyntaxError(message, array);
+ parser_->isolate()->Throw(*result, &location);
+}
+
+
+void ParserTraits::ReportMessage(const char* message,
+ Vector<Handle<String> > args,
+ bool is_reference_error) {
+ Scanner::Location source_location = parser_->scanner()->location();
+ ReportMessageAt(source_location, message, args, is_reference_error);
+}
+
+
+void ParserTraits::ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<Handle<String> > args,
+ bool is_reference_error) {
+ if (parser_->stack_overflow()) {
+ // Suppress the error message (syntax error or such) in the presence of a
+ // stack overflow. The isolate allows only one pending exception at at time
+ // and we want to report the stack overflow later.
+ return;
+ }
+ MessageLocation location(parser_->script_,
+ source_location.beg_pos,
+ source_location.end_pos);
+ Factory* factory = parser_->isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ elements->set(i, *args[i]);
+ }
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = is_reference_error
+ ? factory->NewReferenceError(message, array)
+ : factory->NewSyntaxError(message, array);
+ parser_->isolate()->Throw(*result, &location);
+}
+
+
+Handle<String> ParserTraits::GetSymbol(Scanner* scanner) {
+ if (parser_->cached_data_mode() == CONSUME_CACHED_DATA) {
+ int symbol_id = (*parser_->cached_data())->GetSymbolIdentifier();
+ // If there is no symbol data, -1 will be returned.
+ if (symbol_id >= 0 &&
+ symbol_id < (*parser_->cached_data())->symbol_count()) {
+ return parser_->LookupCachedSymbol(symbol_id);
+ }
+ } else if (parser_->cached_data_mode() == PRODUCE_CACHED_DATA) {
+ if (parser_->log_->ShouldLogSymbols()) {
+ parser_->scanner()->LogSymbol(parser_->log_, parser_->position());
+ }
+ }
+ Handle<String> result =
+ parser_->scanner()->AllocateInternalizedString(parser_->isolate_);
+ ASSERT(!result.is_null());
+ return result;
+}
+
+
+Handle<String> ParserTraits::NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured) {
+ return scanner->AllocateNextLiteralString(parser_->isolate(), tenured);
+}
+
+
+Expression* ParserTraits::ThisExpression(
+ Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ return factory->NewVariableProxy(scope->receiver());
+}
+
+
+Literal* ParserTraits::ExpressionFromLiteral(
+ Token::Value token, int pos,
+ Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ Factory* isolate_factory = parser_->isolate()->factory();
+ switch (token) {
+ case Token::NULL_LITERAL:
+ return factory->NewLiteral(isolate_factory->null_value(), pos);
+ case Token::TRUE_LITERAL:
+ return factory->NewLiteral(isolate_factory->true_value(), pos);
+ case Token::FALSE_LITERAL:
+ return factory->NewLiteral(isolate_factory->false_value(), pos);
+ case Token::NUMBER: {
+ double value = scanner->DoubleValue();
+ return factory->NewNumberLiteral(value, pos);
+ }
+ default:
+ ASSERT(false);
+ }
+ return NULL;
+}
+
+
+Expression* ParserTraits::ExpressionFromIdentifier(
+ Handle<String> name, int pos, Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
+ // The name may refer to a module instance object, so its type is unknown.
+#ifdef DEBUG
+ if (FLAG_print_interface_details)
+ PrintF("# Variable %s ", name->ToAsciiArray());
+#endif
+ Interface* interface = Interface::NewUnknown(parser_->zone());
+ return scope->NewUnresolved(factory, name, interface, pos);
+}
+
+
+Expression* ParserTraits::ExpressionFromString(
+ int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ Handle<String> symbol = GetSymbol(scanner);
+ if (parser_->fni_ != NULL) parser_->fni_->PushLiteralName(symbol);
+ return factory->NewLiteral(symbol, pos);
+}
+
+
+Literal* ParserTraits::GetLiteralTheHole(
+ int position, AstNodeFactory<AstConstructionVisitor>* factory) {
+ return factory->NewLiteral(parser_->isolate()->factory()->the_hole_value(),
+ RelocInfo::kNoPosition);
+}
+
+
+Expression* ParserTraits::ParseV8Intrinsic(bool* ok) {
+ return parser_->ParseV8Intrinsic(ok);
+}
+
+
+FunctionLiteral* ParserTraits::ParseFunctionLiteral(
+ Handle<String> name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ bool* ok) {
+ return parser_->ParseFunctionLiteral(name, function_name_location,
+ name_is_strict_reserved, is_generator,
+ function_token_position, type, ok);
+}
+
+
Parser::Parser(CompilationInfo* info)
- : ParserBase(&scanner_, info->isolate()->stack_guard()->real_climit()),
+ : ParserBase<ParserTraits>(&scanner_,
+ info->isolate()->stack_guard()->real_climit(),
+ info->extension(),
+ NULL,
+ info->zone(),
+ this),
isolate_(info->isolate()),
symbol_cache_(0, info->zone()),
script_(info->script()),
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
- top_scope_(NULL),
original_scope_(NULL),
- current_function_state_(NULL),
target_stack_(NULL),
- extension_(info->extension()),
- pre_parse_data_(NULL),
- fni_(NULL),
- parenthesized_function_(false),
- zone_(info->zone()),
+ cached_data_(NULL),
+ cached_data_mode_(NO_CACHED_DATA),
info_(info) {
ASSERT(!script_.is_null());
isolate_->set_ast_node_id(0);
@@ -575,6 +808,13 @@ FunctionLiteral* Parser::ParseProgram() {
fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
+ CompleteParserRecorder recorder;
+ if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
+ log_ = &recorder;
+ } else if (cached_data_mode_ == CONSUME_CACHED_DATA) {
+ (*cached_data_)->Initialize();
+ }
+
source->TryFlatten();
FunctionLiteral* result;
if (source->IsExternalTwoByteString()) {
@@ -604,27 +844,31 @@ FunctionLiteral* Parser::ParseProgram() {
}
PrintF(" - took %0.3f ms]\n", ms);
}
+ if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
+ Vector<unsigned> store = recorder.ExtractData();
+ *cached_data_ = new ScriptDataImpl(store);
+ log_ = NULL;
+ }
return result;
}
FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
Handle<String> source) {
- ASSERT(top_scope_ == NULL);
+ ASSERT(scope_ == NULL);
ASSERT(target_stack_ == NULL);
- if (pre_parse_data_ != NULL) pre_parse_data_->Initialize();
Handle<String> no_name = isolate()->factory()->empty_string();
FunctionLiteral* result = NULL;
- { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+ { Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
info->SetGlobalScope(scope);
if (!info->context().is_null()) {
scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
}
original_scope_ = scope;
if (info->is_eval()) {
- if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
+ if (!scope->is_global_scope() || info->strict_mode() == STRICT) {
scope = NewScope(scope, EVAL_SCOPE);
}
} else if (info->is_global()) {
@@ -643,19 +887,19 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
ParsingModeScope parsing_mode(this, mode);
// Enters 'scope'.
- FunctionState function_state(this, scope);
+ FunctionState function_state(&function_state_, &scope_, scope, zone());
- top_scope_->SetLanguageMode(info->language_mode());
+ scope_->SetStrictMode(info->strict_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
- int beg_pos = scanner().location().beg_pos;
+ int beg_pos = scanner()->location().beg_pos;
ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
- if (ok && !top_scope_->is_classic_mode()) {
- CheckOctalLiteral(beg_pos, scanner().location().end_pos, &ok);
+ if (ok && strict_mode() == STRICT) {
+ CheckOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
}
- if (ok && is_extended_mode()) {
- CheckConflictingVarDeclarations(top_scope_, &ok);
+ if (ok && allow_harmony_scoping() && strict_mode() == STRICT) {
+ CheckConflictingVarDeclarations(scope_, &ok);
}
if (ok && info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
@@ -671,7 +915,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
if (ok) {
result = factory()->NewFunctionLiteral(
no_name,
- top_scope_,
+ scope_,
body,
function_state.materialized_literal_count(),
function_state.expected_property_count(),
@@ -684,6 +928,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral::kNotGenerator,
0);
result->set_ast_properties(factory()->visitor()->ast_properties());
+ result->set_slot_processor(factory()->visitor()->slot_processor());
result->set_dont_optimize_reason(
factory()->visitor()->dont_optimize_reason());
} else if (stack_overflow()) {
@@ -736,7 +981,7 @@ FunctionLiteral* Parser::ParseLazy() {
FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
scanner_.Initialize(source);
- ASSERT(top_scope_ == NULL);
+ ASSERT(scope_ == NULL);
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
@@ -750,19 +995,17 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
{
// Parse the function literal.
- Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+ Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
info()->SetGlobalScope(scope);
if (!info()->closure().is_null()) {
scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
zone());
}
original_scope_ = scope;
- FunctionState function_state(this, scope);
- ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
- ASSERT(scope->language_mode() != EXTENDED_MODE ||
- info()->is_extended_mode());
- ASSERT(info()->language_mode() == shared_info->language_mode());
- scope->SetLanguageMode(shared_info->language_mode());
+ FunctionState function_state(&function_state_, &scope_, scope, zone());
+ ASSERT(scope->strict_mode() == SLOPPY || info()->strict_mode() == STRICT);
+ ASSERT(info()->strict_mode() == shared_info->strict_mode());
+ scope->SetStrictMode(shared_info->strict_mode());
FunctionLiteral::FunctionType function_type = shared_info->is_expression()
? (shared_info->is_anonymous()
? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -793,62 +1036,6 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
}
-Handle<String> Parser::GetSymbol() {
- int symbol_id = -1;
- if (pre_parse_data() != NULL) {
- symbol_id = pre_parse_data()->GetSymbolIdentifier();
- }
- return LookupSymbol(symbol_id);
-}
-
-
-void Parser::ReportMessage(const char* message, Vector<const char*> args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, message, args);
-}
-
-
-void Parser::ReportMessage(const char* message, Vector<Handle<String> > args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, message, args);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<const char*> args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
- elements->set(i, *arg_string);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<Handle<String> > args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- elements->set(i, *args[i]);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
-}
-
-
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
bool is_eval,
@@ -871,7 +1058,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
directive_prologue = false;
}
- Scanner::Location token_loc = scanner().peek_location();
+ Scanner::Location token_loc = scanner()->peek_location();
Statement* stat;
if (is_global && !is_eval) {
stat = ParseModuleElement(NULL, CHECK_OK);
@@ -894,7 +1081,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
Handle<String> directive = Handle<String>::cast(literal->value());
// Check "use strict" directive (ES5 14.1).
- if (top_scope_->is_classic_mode() &&
+ if (strict_mode() == SLOPPY &&
directive->Equals(isolate()->heap()->use_strict_string()) &&
token_loc.end_pos - token_loc.beg_pos ==
isolate()->heap()->use_strict_string()->length() + 2) {
@@ -903,17 +1090,15 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// add this scope in DoParseProgram(), but that requires adaptations
// all over the code base, so we go with a quick-fix for now.
// In the same manner, we have to patch the parsing mode.
- if (is_eval && !top_scope_->is_eval_scope()) {
- ASSERT(top_scope_->is_global_scope());
- Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
- scope->set_start_position(top_scope_->start_position());
- scope->set_end_position(top_scope_->end_position());
- top_scope_ = scope;
+ if (is_eval && !scope_->is_eval_scope()) {
+ ASSERT(scope_->is_global_scope());
+ Scope* scope = NewScope(scope_, EVAL_SCOPE);
+ scope->set_start_position(scope_->start_position());
+ scope->set_end_position(scope_->end_position());
+ scope_ = scope;
mode_ = PARSE_EAGERLY;
}
- // TODO(ES6): Fix entering extended mode, once it is specified.
- top_scope_->SetLanguageMode(allow_harmony_scoping()
- ? EXTENDED_MODE : STRICT_MODE);
+ scope_->SetStrictMode(STRICT);
// "use strict" is the only directive for now.
directive_prologue = false;
}
@@ -961,14 +1146,14 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
// Handle 'module' as a context-sensitive keyword.
if (FLAG_harmony_modules &&
peek() == Token::IDENTIFIER &&
- !scanner().HasAnyLineTerminatorBeforeNext() &&
+ !scanner()->HasAnyLineTerminatorBeforeNext() &&
stmt != NULL) {
ExpressionStatement* estmt = stmt->AsExpressionStatement();
if (estmt != NULL &&
estmt->expression()->AsVariableProxy() != NULL &&
estmt->expression()->AsVariableProxy()->name()->Equals(
isolate()->heap()->module_string()) &&
- !scanner().literal_contains_escapes()) {
+ !scanner()->literal_contains_escapes()) {
return ParseModuleDeclaration(NULL, ok);
}
}
@@ -993,7 +1178,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
Module* module = ParseModule(CHECK_OK);
VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
Declaration* declaration =
- factory()->NewModuleDeclaration(proxy, module, top_scope_, pos);
+ factory()->NewModuleDeclaration(proxy, module, scope_, pos);
Declare(declaration, true, CHECK_OK);
#ifdef DEBUG
@@ -1051,14 +1236,14 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
#ifdef DEBUG
if (FLAG_print_interface_details) PrintF("# Literal ");
#endif
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ Scope* scope = NewScope(scope_, MODULE_SCOPE);
Expect(Token::LBRACE, CHECK_OK);
- scope->set_start_position(scanner().location().beg_pos);
- scope->SetLanguageMode(EXTENDED_MODE);
+ scope->set_start_position(scanner()->location().beg_pos);
+ scope->SetStrictMode(STRICT);
{
- BlockState block_state(this, scope);
+ BlockState block_state(&scope_, scope);
TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1072,7 +1257,7 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
}
Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner().location().end_pos);
+ scope->set_end_position(scanner()->location().end_pos);
body->set_scope(scope);
// Check that all exports are bound.
@@ -1081,8 +1266,8 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
!it.done(); it.Advance()) {
if (scope->LocalLookup(it.name()) == NULL) {
Handle<String> name(it.name());
- ReportMessage("module_export_undefined",
- Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("module_export_undefined",
+ Vector<Handle<String> >(&name, 1));
*ok = false;
return NULL;
}
@@ -1121,7 +1306,8 @@ Module* Parser::ParseModulePath(bool* ok) {
member->interface()->Print();
}
#endif
- ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("invalid_module_path",
+ Vector<Handle<String> >(&name, 1));
return NULL;
}
result = member;
@@ -1141,9 +1327,9 @@ Module* Parser::ParseModuleVariable(bool* ok) {
if (FLAG_print_interface_details)
PrintF("# Module variable %s ", name->ToAsciiArray());
#endif
- VariableProxy* proxy = top_scope_->NewUnresolved(
+ VariableProxy* proxy = scope_->NewUnresolved(
factory(), name, Interface::NewModule(zone()),
- scanner().location().beg_pos);
+ scanner()->location().beg_pos);
return factory()->NewModuleVariable(proxy, pos);
}
@@ -1165,7 +1351,7 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// Create an empty literal as long as the feature isn't finished.
USE(symbol);
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ Scope* scope = NewScope(scope_, MODULE_SCOPE);
Block* body = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
body->set_scope(scope);
Interface* interface = scope->interface();
@@ -1231,12 +1417,13 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
module->interface()->Print();
}
#endif
- ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("invalid_module_path",
+ Vector<Handle<String> >(&name, 1));
return NULL;
}
VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
Declaration* declaration =
- factory()->NewImportDeclaration(proxy, module, top_scope_, pos);
+ factory()->NewImportDeclaration(proxy, module, scope_, pos);
Declare(declaration, true, CHECK_OK);
}
@@ -1291,12 +1478,12 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
default:
*ok = false;
- ReportUnexpectedToken(scanner().current_token());
+ ReportUnexpectedToken(scanner()->current_token());
return NULL;
}
// Extract declared names into export declarations and interface.
- Interface* interface = top_scope_->interface();
+ Interface* interface = scope_->interface();
for (int i = 0; i < names.length(); ++i) {
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1311,8 +1498,8 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// TODO(rossberg): Rethink whether we actually need to store export
// declarations (for compilation?).
// ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, top_scope_, position);
- // top_scope_->AddDeclaration(declaration);
+ // factory()->NewExportDeclaration(proxy, scope_, position);
+ // scope_->AddDeclaration(declaration);
}
ASSERT(result != NULL);
@@ -1438,9 +1625,8 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// In Harmony mode, this case also handles the extension:
// Statement:
// GeneratorDeclaration
- if (!top_scope_->is_classic_mode()) {
- ReportMessageAt(scanner().peek_location(), "strict_function",
- Vector<const char*>::empty());
+ if (strict_mode() == STRICT) {
+ ReportMessageAt(scanner()->peek_location(), "strict_function");
*ok = false;
return NULL;
}
@@ -1484,7 +1670,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// Similarly, strict mode eval scope does not leak variable declarations to
// the caller's scope so we declare all locals, too.
if (declaration_scope->is_function_scope() ||
- declaration_scope->is_strict_or_extended_eval_scope() ||
+ declaration_scope->is_strict_eval_scope() ||
declaration_scope->is_block_scope() ||
declaration_scope->is_module_scope() ||
declaration_scope->is_global_scope()) {
@@ -1517,8 +1703,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
ASSERT(IsDeclaredVariableMode(var->mode()));
- if (is_extended_mode()) {
- // In harmony mode we treat re-declarations as early errors. See
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ // In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
const char* elms[2] = { "Variable", c_string.get() };
@@ -1528,8 +1714,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
return;
}
Handle<String> message_string =
- isolate()->factory()->NewStringFromUtf8(CStrVector("Variable"),
- TENURED);
+ isolate()->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("Variable"));
Expression* expression =
NewThrowTypeError(isolate()->factory()->redeclaration_string(),
message_string, name);
@@ -1552,10 +1738,10 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// same variable if it is declared several times. This is not a
// semantic issue as long as we keep the source order, but it may be
// a performance issue since it may lead to repeated
- // Runtime::DeclareContextSlot() calls.
+ // RuntimeHidden_DeclareContextSlot calls.
declaration_scope->AddDeclaration(declaration);
- if (mode == CONST && declaration_scope->is_global_scope()) {
+ if (mode == CONST_LEGACY && declaration_scope->is_global_scope()) {
// For global const variables we bind the proxy to a variable.
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
@@ -1563,8 +1749,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
declaration_scope, name, mode, true, kind,
kNeedsInitialization, proxy->interface());
} else if (declaration_scope->is_eval_scope() &&
- declaration_scope->is_classic_mode()) {
- // For variable declarations in a non-strict eval scope the proxy is bound
+ declaration_scope->strict_mode() == SLOPPY) {
+ // For variable declarations in a sloppy eval scope the proxy is bound
// to a lookup variable to force a dynamic declaration using the
// DeclareContextSlot runtime function.
Variable::Kind kind = Variable::NORMAL;
@@ -1619,7 +1805,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
var->interface()->Print();
}
#endif
- ReportMessage("module_type_error", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("module_type_error",
+ Vector<Handle<String> >(&name, 1));
}
}
}
@@ -1658,7 +1845,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// other functions are set up when entering the surrounding scope.
VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, VAR, top_scope_, pos);
+ factory()->NewVariableDeclaration(proxy, VAR, scope_, pos);
Declare(declaration, true, CHECK_OK);
NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
name, extension_, RelocInfo::kNoPosition);
@@ -1682,7 +1869,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
Handle<String> name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name,
- scanner().location(),
+ scanner()->location(),
is_strict_reserved,
is_generator,
pos,
@@ -1694,10 +1881,11 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
// In extended mode, a function behaves as a lexical binding, except in the
// global scope.
VariableMode mode =
- is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
+ allow_harmony_scoping() &&
+ strict_mode() == STRICT && !scope_->is_global_scope() ? LET : VAR;
VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_, pos);
+ factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
Declare(declaration, true, CHECK_OK);
if (names) names->Add(name, zone());
return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
@@ -1705,7 +1893,9 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
- if (top_scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ return ParseScopedBlock(labels, ok);
+ }
// Block ::
// '{' Statement* '}'
@@ -1738,12 +1928,12 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
// Construct block expecting 16 statements.
Block* body =
factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
- Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
+ Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
Expect(Token::LBRACE, CHECK_OK);
- block_scope->set_start_position(scanner().location().beg_pos);
- { BlockState block_state(this, block_scope);
+ block_scope->set_start_position(scanner()->location().beg_pos);
+ { BlockState block_state(&scope_, block_scope);
TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1756,7 +1946,7 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
}
}
Expect(Token::RBRACE, CHECK_OK);
- block_scope->set_end_position(scanner().location().end_pos);
+ block_scope->set_end_position(scanner()->location().end_pos);
block_scope = block_scope->FinalizeBlockScope();
body->set_scope(block_scope);
return body;
@@ -1777,12 +1967,6 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
}
-bool Parser::IsEvalOrArguments(Handle<String> string) {
- return string.is_identical_to(isolate()->factory()->eval_string()) ||
- string.is_identical_to(isolate()->factory()->arguments_string());
-}
-
-
// If the variable declaration declares exactly one non-const
// variable, then *out is set to that variable. In all other cases,
// *out is untouched; in particular, it is the caller's responsibility
@@ -1827,29 +2011,31 @@ Block* Parser::ParseVariableDeclarations(
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
//
- // However disallowing const in classic mode will break compatibility with
+ // However disallowing const in sloppy mode will break compatibility with
// existing pages. Therefore we keep allowing const with the old
- // non-harmony semantics in classic mode.
+ // non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- switch (top_scope_->language_mode()) {
- case CLASSIC_MODE:
- mode = CONST;
- init_op = Token::INIT_CONST;
+ switch (strict_mode()) {
+ case SLOPPY:
+ mode = CONST_LEGACY;
+ init_op = Token::INIT_CONST_LEGACY;
break;
- case STRICT_MODE:
- ReportMessage("strict_const", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- case EXTENDED_MODE:
- if (var_context == kStatement) {
- // In extended mode 'const' declarations are only allowed in source
- // element positions.
- ReportMessage("unprotected_const", Vector<const char*>::empty());
+ case STRICT:
+ if (allow_harmony_scoping()) {
+ if (var_context == kStatement) {
+ // In strict mode 'const' declarations are only allowed in source
+ // element positions.
+ ReportMessage("unprotected_const", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ mode = CONST;
+ init_op = Token::INIT_CONST;
+ } else {
+ ReportMessage("strict_const", Vector<const char*>::empty());
*ok = false;
return NULL;
}
- mode = CONST_HARMONY;
- init_op = Token::INIT_CONST_HARMONY;
}
is_const = true;
needs_init = true;
@@ -1860,7 +2046,9 @@ Block* Parser::ParseVariableDeclarations(
//
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
- if (!is_extended_mode()) {
+ //
+ // TODO(rossberg): make 'let' a legal identifier in sloppy mode.
+ if (!allow_harmony_scoping() || strict_mode() == SLOPPY) {
ReportMessage("illegal_let", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -1924,12 +2112,11 @@ Block* Parser::ParseVariableDeclarations(
is_const ? Interface::NewConst() : Interface::NewValue();
VariableProxy* proxy = NewUnresolved(name, mode, interface);
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, top_scope_, pos);
+ factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
Declare(declaration, mode != VAR, CHECK_OK);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
- ReportMessageAt(scanner().location(), "too_many_variables",
- Vector<const char*>::empty());
+ ReportMessageAt(scanner()->location(), "too_many_variables");
*ok = false;
return NULL;
}
@@ -1944,7 +2131,7 @@ Block* Parser::ParseVariableDeclarations(
//
// var v; v = x;
//
- // In particular, we need to re-lookup 'v' (in top_scope_, not
+ // In particular, we need to re-lookup 'v' (in scope_, not
// declaration_scope) as it may be a different 'v' than the 'v' in the
// declaration (e.g., if we are inside a 'with' statement or 'catch'
// block).
@@ -1962,11 +2149,11 @@ Block* Parser::ParseVariableDeclarations(
// one - there is no re-lookup (see the last parameter of the
// Declare() call above).
- Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
+ Scope* initialization_scope = is_const ? declaration_scope : scope_;
Expression* value = NULL;
int pos = -1;
// Harmony consts have non-optional initializers.
- if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
+ if (peek() == Token::ASSIGN || mode == CONST) {
Expect(Token::ASSIGN, CHECK_OK);
pos = position();
value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
@@ -2029,13 +2216,13 @@ Block* Parser::ParseVariableDeclarations(
// the number of arguments (1 or 2).
initialize = factory()->NewCallRuntime(
isolate()->factory()->InitializeConstGlobal_string(),
- Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
+ Runtime::FunctionForId(Runtime::kHiddenInitializeConstGlobal),
arguments, pos);
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
- LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode, pos), zone());
+ StrictMode strict_mode = initialization_scope->strict_mode();
+ arguments->Add(factory()->NewNumberLiteral(strict_mode, pos), zone());
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -2153,7 +2340,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
// during the scope processing.
- top_scope_->RemoveUnresolved(var);
+ scope_->RemoveUnresolved(var);
Expect(Token::COLON, CHECK_OK);
return ParseStatement(labels, ok);
}
@@ -2163,12 +2350,12 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// no line-terminator between the two words.
if (extension_ != NULL &&
peek() == Token::FUNCTION &&
- !scanner().HasAnyLineTerminatorBeforeNext() &&
+ !scanner()->HasAnyLineTerminatorBeforeNext() &&
expr != NULL &&
expr->AsVariableProxy() != NULL &&
expr->AsVariableProxy()->name()->Equals(
isolate()->heap()->native_string()) &&
- !scanner().literal_contains_escapes()) {
+ !scanner()->literal_contains_escapes()) {
return ParseNativeDeclaration(ok);
}
@@ -2176,11 +2363,11 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Only expect semicolon in the former case.
if (!FLAG_harmony_modules ||
peek() != Token::IDENTIFIER ||
- scanner().HasAnyLineTerminatorBeforeNext() ||
+ scanner()->HasAnyLineTerminatorBeforeNext() ||
expr->AsVariableProxy() == NULL ||
!expr->AsVariableProxy()->name()->Equals(
isolate()->heap()->module_string()) ||
- scanner().literal_contains_escapes()) {
+ scanner()->literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
return factory()->NewExpressionStatement(expr, pos);
@@ -2217,7 +2404,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
@@ -2232,7 +2419,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
message = "unknown_label";
args = Vector<Handle<String> >(&label, 1);
}
- ReportMessageAt(scanner().location(), message, args);
+ ParserTraits::ReportMessageAt(scanner()->location(), message, args);
*ok = false;
return NULL;
}
@@ -2249,7 +2436,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
@@ -2270,7 +2457,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
message = "unknown_label";
args = Vector<Handle<String> >(&label, 1);
}
- ReportMessageAt(scanner().location(), message, args);
+ ParserTraits::ReportMessageAt(scanner()->location(), message, args);
*ok = false;
return NULL;
}
@@ -2292,7 +2479,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Token::Value tok = peek();
Statement* result;
Expression* return_value;
- if (scanner().HasAnyLineTerminatorBeforeNext() ||
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
@@ -2303,7 +2490,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
ExpectSemicolon(CHECK_OK);
if (is_generator()) {
Expression* generator = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Expression* yield = factory()->NewYield(
generator, return_value, Yield::FINAL, pos);
result = factory()->NewExpressionStatement(yield, pos);
@@ -2316,7 +2503,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// function. See ECMA-262, section 12.9, page 67.
//
// To be consistent with KJS we report the syntax error at runtime.
- Scope* declaration_scope = top_scope_->DeclarationScope();
+ Scope* declaration_scope = scope_->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_eval_scope()) {
Handle<String> message = isolate()->factory()->illegal_return_string();
@@ -2335,7 +2522,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::WITH, CHECK_OK);
int pos = position();
- if (!top_scope_->is_classic_mode()) {
+ if (strict_mode() == STRICT) {
ReportMessage("strict_mode_with", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2345,13 +2532,13 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- top_scope_->DeclarationScope()->RecordWithStatement();
- Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
+ scope_->DeclarationScope()->RecordWithStatement();
+ Scope* with_scope = NewScope(scope_, WITH_SCOPE);
Statement* stmt;
- { BlockState block_state(this, with_scope);
- with_scope->set_start_position(scanner().peek_location().beg_pos);
+ { BlockState block_state(&scope_, with_scope);
+ with_scope->set_start_position(scanner()->peek_location().beg_pos);
stmt = ParseStatement(labels, CHECK_OK);
- with_scope->set_end_position(scanner().location().end_pos);
+ with_scope->set_end_position(scanner()->location().end_pos);
}
return factory()->NewWithStatement(with_scope, expr, stmt, pos);
}
@@ -2425,7 +2612,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
int pos = position();
- if (scanner().HasAnyLineTerminatorBeforeNext()) {
+ if (scanner()->HasAnyLineTerminatorBeforeNext()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2480,21 +2667,22 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- catch_scope = NewScope(top_scope_, CATCH_SCOPE);
- catch_scope->set_start_position(scanner().location().beg_pos);
+ catch_scope = NewScope(scope_, CATCH_SCOPE);
+ catch_scope->set_start_position(scanner()->location().beg_pos);
name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
Target target(&this->target_stack_, &catch_collector);
- VariableMode mode = is_extended_mode() ? LET : VAR;
+ VariableMode mode =
+ allow_harmony_scoping() && strict_mode() == STRICT ? LET : VAR;
catch_variable =
catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
- BlockState block_state(this, catch_scope);
+ BlockState block_state(&scope_, catch_scope);
catch_block = ParseBlock(NULL, CHECK_OK);
- catch_scope->set_end_position(scanner().location().end_pos);
+ catch_scope->set_end_position(scanner()->location().end_pos);
tok = peek();
}
@@ -2513,7 +2701,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL && finally_block != NULL) {
// If we have both, create an inner try/catch.
ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
TryCatchStatement* statement = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block,
RelocInfo::kNoPosition);
@@ -2527,12 +2715,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
result = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block, pos);
} else {
ASSERT(finally_block != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
result = factory()->NewTryFinallyStatement(
index, try_block, finally_block, pos);
// Combine the jump targets of the try block and the possible catch block.
@@ -2612,9 +2800,9 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
if (for_of != NULL) {
Factory* heap_factory = isolate()->factory();
- Variable* iterator = top_scope_->DeclarationScope()->NewTemporary(
+ Variable* iterator = scope_->DeclarationScope()->NewTemporary(
heap_factory->dot_iterator_string());
- Variable* result = top_scope_->DeclarationScope()->NewTemporary(
+ Variable* result = scope_->DeclarationScope()->NewTemporary(
heap_factory->dot_result_string());
Expression* assign_iterator;
@@ -2681,13 +2869,13 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* init = NULL;
// Create an in-between scope for let-bound iteration variables.
- Scope* saved_scope = top_scope_;
- Scope* for_scope = NewScope(top_scope_, BLOCK_SCOPE);
- top_scope_ = for_scope;
+ Scope* saved_scope = scope_;
+ Scope* for_scope = NewScope(scope_, BLOCK_SCOPE);
+ scope_ = for_scope;
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- for_scope->set_start_position(scanner().location().beg_pos);
+ for_scope->set_start_position(scanner()->location().beg_pos);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
bool is_const = peek() == Token::CONST;
@@ -2710,15 +2898,15 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
- top_scope_->NewUnresolved(factory(), name, interface);
+ scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, each, enumerable, body);
Block* result =
factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
ASSERT(for_scope == NULL);
// Parsed for-in loop w/ variable/const declaration.
@@ -2755,21 +2943,22 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Factory* heap_factory = isolate()->factory();
Handle<String> tempstr =
heap_factory->NewConsString(heap_factory->dot_for_string(), name);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate(), tempstr, 0);
Handle<String> tempname = heap_factory->InternalizeString(tempstr);
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
+ Variable* temp = scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
// The expression does not see the loop variable.
- top_scope_ = saved_scope;
+ scope_ = saved_scope;
Expression* enumerable = ParseExpression(true, CHECK_OK);
- top_scope_ = for_scope;
+ scope_ = for_scope;
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
- top_scope_->NewUnresolved(factory(), name, Interface::NewValue());
+ scope_->NewUnresolved(factory(), name, Interface::NewValue());
Statement* body = ParseStatement(NULL, CHECK_OK);
Block* body_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
@@ -2781,8 +2970,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
body_block->AddStatement(assignment_statement, zone());
body_block->AddStatement(body, zone());
InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
body_block->set_scope(for_scope);
// Parsed for-in loop w/ let declaration.
@@ -2792,19 +2981,16 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
init = variable_statement;
}
} else {
+ Scanner::Location lhs_location = scanner()->peek_location();
Expression* expression = ParseExpression(false, CHECK_OK);
ForEachStatement::VisitMode mode;
bool accept_OF = expression->AsVariableProxy();
if (CheckInOrOf(accept_OF, &mode)) {
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report
- // the error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_for_in_string();
- expression = NewThrowReferenceError(message);
+ ReportMessageAt(lhs_location, "invalid_lhs_in_for", true);
+ *ok = false;
+ return NULL;
}
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, pos);
@@ -2815,8 +3001,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, expression, enumerable, body);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
ASSERT(for_scope == NULL);
// Parsed for-in loop.
@@ -2850,8 +3036,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
if (for_scope != NULL) {
// Rewrite a for statement of the form
@@ -2878,581 +3064,6 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
}
-// Precedence = 1
-Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
- // Expression ::
- // AssignmentExpression
- // Expression ',' AssignmentExpression
-
- Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == Token::COMMA) {
- Expect(Token::COMMA, CHECK_OK);
- int pos = position();
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
- }
- return result;
-}
-
-
-// Precedence = 2
-Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
- // AssignmentExpression ::
- // ConditionalExpression
- // YieldExpression
- // LeftHandSideExpression AssignmentOperator AssignmentExpression
-
- if (peek() == Token::YIELD && is_generator()) {
- return ParseYieldExpression(ok);
- }
-
- if (fni_ != NULL) fni_->Enter();
- Expression* expression = ParseConditionalExpression(accept_IN, CHECK_OK);
-
- if (!Token::IsAssignmentOp(peek())) {
- if (fni_ != NULL) fni_->Leave();
- // Parsed conditional expression only (no assignment).
- return expression;
- }
-
- // Signal a reference error if the expression is an invalid left-hand
- // side expression. We could report this as a syntax error here but
- // for compatibility with JSC we choose to report the error at
- // runtime.
- // TODO(ES5): Should change parsing for spec conformance.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_assignment_string();
- expression = NewThrowReferenceError(message);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Assignment to eval or arguments is disallowed in strict mode.
- CheckStrictModeLValue(expression, CHECK_OK);
- }
- MarkAsLValue(expression);
-
- Token::Value op = Next(); // Get assignment operator.
- int pos = position();
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-
- // TODO(1231235): We try to estimate the set of properties set by
- // constructors. We define a new property whenever there is an
- // assignment to a property of 'this'. We should probably only add
- // properties if we haven't seen them before. Otherwise we'll
- // probably overestimate the number of properties.
- Property* property = expression ? expression->AsProperty() : NULL;
- if (op == Token::ASSIGN &&
- property != NULL &&
- property->obj()->AsVariableProxy() != NULL &&
- property->obj()->AsVariableProxy()->is_this()) {
- current_function_state_->AddProperty();
- }
-
- // If we assign a function literal to a property we pretenure the
- // literal so it can be added as a constant function property.
- if (property != NULL && right->AsFunctionLiteral() != NULL) {
- right->AsFunctionLiteral()->set_pretenure();
- }
-
- if (fni_ != NULL) {
- // Check if the right hand side is a call to avoid inferring a
- // name if we're dealing with "a = function(){...}();"-like
- // expression.
- if ((op == Token::INIT_VAR
- || op == Token::INIT_CONST
- || op == Token::ASSIGN)
- && (right->AsCall() == NULL && right->AsCallNew() == NULL)) {
- fni_->Infer();
- } else {
- fni_->RemoveLastFunction();
- }
- fni_->Leave();
- }
-
- return factory()->NewAssignment(op, expression, right, pos);
-}
-
-
-Expression* Parser::ParseYieldExpression(bool* ok) {
- // YieldExpression ::
- // 'yield' '*'? AssignmentExpression
- int pos = peek_position();
- Expect(Token::YIELD, CHECK_OK);
- Yield::Kind kind =
- Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
- Expression* generator_object = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
- Expression* expression = ParseAssignmentExpression(false, CHECK_OK);
- Yield* yield = factory()->NewYield(generator_object, expression, kind, pos);
- if (kind == Yield::DELEGATING) {
- yield->set_index(current_function_state_->NextHandlerIndex());
- }
- return yield;
-}
-
-
-// Precedence = 3
-Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
- // ConditionalExpression ::
- // LogicalOrExpression
- // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
- int pos = peek_position();
- // We start using the binary expression parser for prec >= 4 only!
- Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != Token::CONDITIONAL) return expression;
- Consume(Token::CONDITIONAL);
- // In parsing the first assignment expression in conditional
- // expressions we always accept the 'in' keyword; see ECMA-262,
- // section 11.12, page 58.
- Expression* left = ParseAssignmentExpression(true, CHECK_OK);
- Expect(Token::COLON, CHECK_OK);
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return factory()->NewConditional(expression, left, right, pos);
-}
-
-
-int ParserBase::Precedence(Token::Value tok, bool accept_IN) {
- if (tok == Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return Token::Precedence(tok);
-}
-
-
-// Precedence >= 4
-Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
- ASSERT(prec >= 4);
- Expression* x = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
- // prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- Token::Value op = Next();
- int pos = position();
- Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
-
- // Compute some expressions involving only number literals.
- if (x && x->AsLiteral() && x->AsLiteral()->value()->IsNumber() &&
- y && y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) {
- double x_val = x->AsLiteral()->value()->Number();
- double y_val = y->AsLiteral()->value()->Number();
-
- switch (op) {
- case Token::ADD:
- x = factory()->NewNumberLiteral(x_val + y_val, pos);
- continue;
- case Token::SUB:
- x = factory()->NewNumberLiteral(x_val - y_val, pos);
- continue;
- case Token::MUL:
- x = factory()->NewNumberLiteral(x_val * y_val, pos);
- continue;
- case Token::DIV:
- x = factory()->NewNumberLiteral(x_val / y_val, pos);
- continue;
- case Token::BIT_OR: {
- int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::BIT_AND: {
- int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::BIT_XOR: {
- int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::SHL: {
- int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::SHR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
- uint32_t value = DoubleToUint32(x_val) >> shift;
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::SAR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
- int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- default:
- break;
- }
- }
-
- // For now we distinguish between comparisons and other binary
- // operations. (We could combine the two and get rid of this
- // code and AST node eventually.)
- if (Token::IsCompareOp(op)) {
- // We have a comparison.
- Token::Value cmp = op;
- switch (op) {
- case Token::NE: cmp = Token::EQ; break;
- case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
- default: break;
- }
- x = factory()->NewCompareOperation(cmp, x, y, pos);
- if (cmp != op) {
- // The comparison was negated - add a NOT.
- x = factory()->NewUnaryOperation(Token::NOT, x, pos);
- }
-
- } else {
- // We have a "normal" binary operation.
- x = factory()->NewBinaryOperation(op, x, y, pos);
- }
- }
- }
- return x;
-}
-
-
-Expression* Parser::ParseUnaryExpression(bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
-
- Token::Value op = peek();
- if (Token::IsUnaryOp(op)) {
- op = Next();
- int pos = position();
- Expression* expression = ParseUnaryExpression(CHECK_OK);
-
- if (expression != NULL && (expression->AsLiteral() != NULL)) {
- Handle<Object> literal = expression->AsLiteral()->value();
- if (op == Token::NOT) {
- // Convert the literal to a boolean condition and negate it.
- bool condition = literal->BooleanValue();
- Handle<Object> result = isolate()->factory()->ToBoolean(!condition);
- return factory()->NewLiteral(result, pos);
- } else if (literal->IsNumber()) {
- // Compute some expressions involving only number literals.
- double value = literal->Number();
- switch (op) {
- case Token::ADD:
- return expression;
- case Token::SUB:
- return factory()->NewNumberLiteral(-value, pos);
- case Token::BIT_NOT:
- return factory()->NewNumberLiteral(~DoubleToInt32(value), pos);
- default:
- break;
- }
- }
- }
-
- // "delete identifier" is a syntax error in strict mode.
- if (op == Token::DELETE && !top_scope_->is_classic_mode()) {
- VariableProxy* operand = expression->AsVariableProxy();
- if (operand != NULL && !operand->is_this()) {
- ReportMessage("strict_delete", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- }
-
- // Desugar '+foo' into 'foo*1', this enables the collection of type feedback
- // without any special stub and the multiplication is removed later in
- // Crankshaft's canonicalization pass.
- if (op == Token::ADD) {
- return factory()->NewBinaryOperation(Token::MUL,
- expression,
- factory()->NewNumberLiteral(1, pos),
- pos);
- }
- // The same idea for '-foo' => 'foo*(-1)'.
- if (op == Token::SUB) {
- return factory()->NewBinaryOperation(Token::MUL,
- expression,
- factory()->NewNumberLiteral(-1, pos),
- pos);
- }
- // ...and one more time for '~foo' => 'foo^(~0)'.
- if (op == Token::BIT_NOT) {
- return factory()->NewBinaryOperation(Token::BIT_XOR,
- expression,
- factory()->NewNumberLiteral(~0, pos),
- pos);
- }
-
- return factory()->NewUnaryOperation(op, expression, pos);
-
- } else if (Token::IsCountOp(op)) {
- op = Next();
- Expression* expression = ParseUnaryExpression(CHECK_OK);
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report the
- // error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_prefix_op_string();
- expression = NewThrowReferenceError(message);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Prefix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, CHECK_OK);
- }
- MarkAsLValue(expression);
-
- return factory()->NewCountOperation(op,
- true /* prefix */,
- expression,
- position());
-
- } else {
- return ParsePostfixExpression(ok);
- }
-}
-
-
-Expression* Parser::ParsePostfixExpression(bool* ok) {
- // PostfixExpression ::
- // LeftHandSideExpression ('++' | '--')?
-
- Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
- Token::IsCountOp(peek())) {
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report the
- // error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_postfix_op_string();
- expression = NewThrowReferenceError(message);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Postfix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, CHECK_OK);
- }
- MarkAsLValue(expression);
-
- Token::Value next = Next();
- expression =
- factory()->NewCountOperation(next,
- false /* postfix */,
- expression,
- position());
- }
- return expression;
-}
-
-
-Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
- // LeftHandSideExpression ::
- // (NewExpression | MemberExpression) ...
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = position();
- Expression* index = ParseExpression(true, CHECK_OK);
- result = factory()->NewProperty(result, index, pos);
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
-
- case Token::LPAREN: {
- int pos;
- if (scanner().current_token() == Token::IDENTIFIER) {
- // For call of an identifier we want to report position of
- // the identifier as position of the call in the stack trace.
- pos = position();
- } else {
- // For other kinds of calls we record position of the parenthesis as
- // position of the call. Note that this is extremely important for
- // expressions of the form function(){...}() for which call position
- // should not point to the closing brace otherwise it will intersect
- // with positions recorded for function literal and confuse debugger.
- pos = peek_position();
- // Also the trailing parenthesis are a hint that the function will
- // be called immediately. If we happen to have parsed a preceding
- // function literal eagerly, we can also compile it eagerly.
- if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
- result->AsFunctionLiteral()->set_parenthesized();
- }
- }
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
-
- // Keep track of eval() calls since they disable all local variable
- // optimizations.
- // The calls that need special treatment are the
- // direct eval calls. These calls are all of the form eval(...), with
- // no explicit receiver.
- // These calls are marked as potentially direct eval calls. Whether
- // they are actually direct calls to eval is determined at run time.
- VariableProxy* callee = result->AsVariableProxy();
- if (callee != NULL &&
- callee->IsVariable(isolate()->factory()->eval_string())) {
- top_scope_->DeclarationScope()->RecordEvalCall();
- }
- result = factory()->NewCall(result, args, pos);
- if (fni_ != NULL) fni_->RemoveLastFunction();
- break;
- }
-
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = position();
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = factory()->NewProperty(
- result, factory()->NewLiteral(name, pos), pos);
- if (fni_ != NULL) fni_->PushLiteralName(name);
- break;
- }
-
- default:
- return result;
- }
- }
-}
-
-
-Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
- // NewExpression ::
- // ('new')+ MemberExpression
-
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- Expect(Token::NEW, CHECK_OK);
- PositionStack::Element pos(stack, position());
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewPrefix(stack, CHECK_OK);
- } else {
- result = ParseMemberWithNewPrefixesExpression(stack, CHECK_OK);
- }
-
- if (!stack->is_empty()) {
- int last = stack->pop();
- result = factory()->NewCallNew(
- result, new(zone()) ZoneList<Expression*>(0, zone()), last);
- }
- return result;
-}
-
-
-Expression* Parser::ParseNewExpression(bool* ok) {
- PositionStack stack(ok);
- return ParseNewPrefix(&stack, ok);
-}
-
-
-Expression* Parser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(NULL, ok);
-}
-
-
-Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok) {
- // MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
-
- // Parse the initial primary or function expression.
- Expression* result = NULL;
- if (peek() == Token::FUNCTION) {
- Consume(Token::FUNCTION);
- int function_token_position = position();
- bool is_generator = allow_generators() && Check(Token::MUL);
- Handle<String> name;
- bool is_strict_reserved_name = false;
- Scanner::Location function_name_location = Scanner::Location::invalid();
- if (peek_any_identifier()) {
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
- CHECK_OK);
- function_name_location = scanner().location();
- }
- FunctionLiteral::FunctionType function_type = name.is_null()
- ? FunctionLiteral::ANONYMOUS_EXPRESSION
- : FunctionLiteral::NAMED_EXPRESSION;
- result = ParseFunctionLiteral(name,
- function_name_location,
- is_strict_reserved_name,
- is_generator,
- function_token_position,
- function_type,
- CHECK_OK);
- } else {
- result = ParsePrimaryExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = position();
- Expression* index = ParseExpression(true, CHECK_OK);
- result = factory()->NewProperty(result, index, pos);
- if (fni_ != NULL) {
- if (index->IsPropertyName()) {
- fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
- } else {
- fni_->PushLiteralName(
- isolate()->factory()->anonymous_function_string());
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = position();
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = factory()->NewProperty(
- result, factory()->NewLiteral(name, pos), pos);
- if (fni_ != NULL) fni_->PushLiteralName(name);
- break;
- }
- case Token::LPAREN: {
- if ((stack == NULL) || stack->is_empty()) return result;
- // Consume one of the new prefixes (already parsed).
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- int pos = stack->pop();
- result = factory()->NewCallNew(result, args, pos);
- break;
- }
- default:
- return result;
- }
- }
-}
-
-
DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
// In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
// contexts this is used as a statement which invokes the debugger as i a
@@ -3476,152 +3087,6 @@ void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) {
}
-Expression* Parser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- int pos = peek_position();
- Expression* result = NULL;
- switch (peek()) {
- case Token::THIS: {
- Consume(Token::THIS);
- result = factory()->NewVariableProxy(top_scope_->receiver());
- break;
- }
-
- case Token::NULL_LITERAL:
- Consume(Token::NULL_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->null_value(), pos);
- break;
-
- case Token::TRUE_LITERAL:
- Consume(Token::TRUE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->true_value(), pos);
- break;
-
- case Token::FALSE_LITERAL:
- Consume(Token::FALSE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->false_value(), pos);
- break;
-
- case Token::IDENTIFIER:
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD: {
- // Using eval or arguments in this context is OK even in strict mode.
- Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
- if (fni_ != NULL) fni_->PushVariableName(name);
- // The name may refer to a module instance object, so its type is unknown.
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Variable %s ", name->ToAsciiArray());
-#endif
- Interface* interface = Interface::NewUnknown(zone());
- result = top_scope_->NewUnresolved(factory(), name, interface, pos);
- break;
- }
-
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(isolate()->unicode_cache(),
- scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTAL |
- ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- result = factory()->NewNumberLiteral(value, pos);
- break;
- }
-
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> symbol = GetSymbol();
- result = factory()->NewLiteral(symbol, pos);
- if (fni_ != NULL) fni_->PushLiteralName(symbol);
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- // Heuristically try to detect immediately called functions before
- // seeing the call parentheses.
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- break;
-
- case Token::MOD:
- if (allow_natives_syntax() || extension_ != NULL) {
- result = ParseV8Intrinsic(CHECK_OK);
- break;
- }
- // If we're not allowing special syntax we fall-through to the
- // default case.
-
- default: {
- Token::Value tok = Next();
- ReportUnexpectedToken(tok);
- *ok = false;
- return NULL;
- }
- }
-
- return result;
-}
-
-
-Expression* Parser::ParseArrayLiteral(bool* ok) {
- // ArrayLiteral ::
- // '[' Expression? (',' Expression?)* ']'
-
- int pos = peek_position();
- ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4, zone());
- Expect(Token::LBRACK, CHECK_OK);
- while (peek() != Token::RBRACK) {
- Expression* elem;
- if (peek() == Token::COMMA) {
- elem = GetLiteralTheHole(peek_position());
- } else {
- elem = ParseAssignmentExpression(true, CHECK_OK);
- }
- values->Add(elem, zone());
- if (peek() != Token::RBRACK) {
- Expect(Token::COMMA, CHECK_OK);
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
-
- // Update the scope information before the pre-parsing bailout.
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- return factory()->NewArrayLiteral(values, literal_index, pos);
-}
-
-
bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
if (expression->AsLiteral() != NULL) return true;
MaterializedLiteral* lit = expression->AsMaterializedLiteral();
@@ -3665,310 +3130,6 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
}
-Expression* Parser::ParseObjectLiteral(bool* ok) {
- // ObjectLiteral ::
- // '{' (
- // ((IdentifierName | String | Number) ':' AssignmentExpression)
- // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // )*[','] '}'
-
- int pos = peek_position();
- ZoneList<ObjectLiteral::Property*>* properties =
- new(zone()) ZoneList<ObjectLiteral::Property*>(4, zone());
- int number_of_boilerplate_properties = 0;
- bool has_function = false;
-
- ObjectLiteralChecker checker(this, top_scope_->language_mode());
-
- Expect(Token::LBRACE, CHECK_OK);
-
- while (peek() != Token::RBRACE) {
- if (fni_ != NULL) fni_->Enter();
-
- Literal* key = NULL;
- Token::Value next = peek();
- int next_pos = peek_position();
-
- switch (next) {
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::IDENTIFIER: {
- bool is_getter = false;
- bool is_setter = false;
- Handle<String> id =
- ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if (fni_ != NULL) fni_->PushLiteralName(id);
-
- if ((is_getter || is_setter) && peek() != Token::COLON) {
- // Special handling of getter and setter syntax:
- // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
- // We have already read the "get" or "set" keyword.
- Token::Value next = Next();
- bool is_keyword = Token::IsKeyword(next);
- if (next != i::Token::IDENTIFIER &&
- next != i::Token::FUTURE_RESERVED_WORD &&
- next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
- next != i::Token::NUMBER &&
- next != i::Token::STRING &&
- !is_keyword) {
- // Unexpected token.
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
- // Validate the property.
- PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
- checker.CheckProperty(next, type, CHECK_OK);
- Handle<String> name = is_keyword
- ? isolate_->factory()->InternalizeUtf8String(Token::String(next))
- : GetSymbol();
- FunctionLiteral* value =
- ParseFunctionLiteral(name,
- scanner().location(),
- false, // reserved words are allowed here
- false, // not a generator
- RelocInfo::kNoPosition,
- FunctionLiteral::ANONYMOUS_EXPRESSION,
- CHECK_OK);
- // Allow any number of parameters for compatibilty with JSC.
- // Specification only allows zero parameters for get and one for set.
- ObjectLiteral::Property* property =
- factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
- if (ObjectLiteral::IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- properties->Add(property, zone());
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- continue; // restart the while
- }
- // Failed to parse as get/set property, so it's just a property
- // called "get" or "set".
- key = factory()->NewLiteral(id, next_pos);
- break;
- }
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> string = GetSymbol();
- if (fni_ != NULL) fni_->PushLiteralName(string);
- uint32_t index;
- if (!string.is_null() && string->AsArrayIndex(&index)) {
- key = factory()->NewNumberLiteral(index, next_pos);
- break;
- }
- key = factory()->NewLiteral(string, next_pos);
- break;
- }
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(isolate()->unicode_cache(),
- scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTAL |
- ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- key = factory()->NewNumberLiteral(value, next_pos);
- break;
- }
- default:
- if (Token::IsKeyword(next)) {
- Consume(next);
- Handle<String> string = GetSymbol();
- key = factory()->NewLiteral(string, next_pos);
- } else {
- // Unexpected token.
- Token::Value next = Next();
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
- }
-
- // Validate the property
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
-
- Expect(Token::COLON, CHECK_OK);
- Expression* value = ParseAssignmentExpression(true, CHECK_OK);
-
- ObjectLiteral::Property* property =
- factory()->NewObjectLiteralProperty(key, value);
-
- // Mark top-level object literals that contain function literals and
- // pretenure the literal so it can be added as a constant function
- // property.
- if (top_scope_->DeclarationScope()->is_global_scope() &&
- value->AsFunctionLiteral() != NULL) {
- has_function = true;
- value->AsFunctionLiteral()->set_pretenure();
- }
-
- // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
- if (ObjectLiteral::IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- properties->Add(property, zone());
-
- // TODO(1240767): Consider allowing trailing comma.
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- }
- Expect(Token::RBRACE, CHECK_OK);
-
- // Computation of literal_index must happen before pre parse bailout.
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- return factory()->NewObjectLiteral(properties,
- literal_index,
- number_of_boilerplate_properties,
- has_function,
- pos);
-}
-
-
-Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
- int pos = peek_position();
- if (!scanner().ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessage("unterminated_regexp", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- Handle<String> js_pattern = NextLiteralString(TENURED);
- scanner().ScanRegExpFlags();
- Handle<String> js_flags = NextLiteralString(TENURED);
- Next();
-
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
-}
-
-
-ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
- // Arguments ::
- // '(' (AssignmentExpression)*[','] ')'
-
- ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4, zone());
- Expect(Token::LPAREN, CHECK_OK);
- bool done = (peek() == Token::RPAREN);
- while (!done) {
- Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
- result->Add(argument, zone());
- if (result->length() > Code::kMaxArguments) {
- ReportMessageAt(scanner().location(), "too_many_arguments",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- done = (peek() == Token::RPAREN);
- if (!done) Expect(Token::COMMA, CHECK_OK);
- }
- Expect(Token::RPAREN, CHECK_OK);
- return result;
-}
-
-
-class SingletonLogger : public ParserRecorder {
- public:
- SingletonLogger() : has_error_(false), start_(-1), end_(-1) { }
- virtual ~SingletonLogger() { }
-
- void Reset() { has_error_ = false; }
-
- virtual void LogFunction(int start,
- int end,
- int literals,
- int properties,
- LanguageMode mode) {
- ASSERT(!has_error_);
- start_ = start;
- end_ = end;
- literals_ = literals;
- properties_ = properties;
- mode_ = mode;
- };
-
- // Logs a symbol creation of a literal or identifier.
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
-
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
- const char* argument_opt) {
- if (has_error_) return;
- has_error_ = true;
- start_ = start;
- end_ = end;
- message_ = message;
- argument_opt_ = argument_opt;
- }
-
- virtual int function_position() { return 0; }
-
- virtual int symbol_position() { return 0; }
-
- virtual int symbol_ids() { return -1; }
-
- virtual Vector<unsigned> ExtractData() {
- UNREACHABLE();
- return Vector<unsigned>();
- }
-
- virtual void PauseRecording() { }
-
- virtual void ResumeRecording() { }
-
- bool has_error() { return has_error_; }
-
- int start() { return start_; }
- int end() { return end_; }
- int literals() {
- ASSERT(!has_error_);
- return literals_;
- }
- int properties() {
- ASSERT(!has_error_);
- return properties_;
- }
- LanguageMode language_mode() {
- ASSERT(!has_error_);
- return mode_;
- }
- const char* message() {
- ASSERT(has_error_);
- return message_;
- }
- const char* argument_opt() {
- ASSERT(has_error_);
- return argument_opt_;
- }
-
- private:
- bool has_error_;
- int start_;
- int end_;
- // For function entries.
- int literals_;
- int properties_;
- LanguageMode mode_;
- // For error messages.
- const char* message_;
- const char* argument_opt_;
-};
-
-
FunctionLiteral* Parser::ParseFunctionLiteral(
Handle<String> function_name,
Scanner::Location function_name_location,
@@ -4021,14 +3182,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// one relative to the deserialized scope chain. Otherwise we must be
// compiling a function in an inner declaration scope in the eval, e.g. a
// nested function, and hoisting works normally relative to that.
- Scope* declaration_scope = top_scope_->DeclarationScope();
+ Scope* declaration_scope = scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope =
- function_type == FunctionLiteral::DECLARATION && !is_extended_mode() &&
+ function_type == FunctionLiteral::DECLARATION &&
+ (!allow_harmony_scoping() || strict_mode() == SLOPPY) &&
(original_scope_ == original_declaration_scope ||
declaration_scope != original_declaration_scope)
? NewScope(declaration_scope, FUNCTION_SCOPE)
- : NewScope(top_scope_, FUNCTION_SCOPE);
+ : NewScope(scope_, FUNCTION_SCOPE);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -4041,23 +3203,23 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral::IsGeneratorFlag generator = is_generator
? FunctionLiteral::kIsGenerator
: FunctionLiteral::kNotGenerator;
+ DeferredFeedbackSlotProcessor* slot_processor;
AstProperties ast_properties;
BailoutReason dont_optimize_reason = kNoReason;
// Parse function body.
- { FunctionState function_state(this, scope);
- top_scope_->SetScopeName(function_name);
+ { FunctionState function_state(&function_state_, &scope_, scope, zone());
+ scope_->SetScopeName(function_name);
if (is_generator) {
// For generators, allocating variables in contexts is currently a win
// because it minimizes the work needed to suspend and resume an
// activation.
- top_scope_->ForceContextAllocation();
+ scope_->ForceContextAllocation();
// Calling a generator returns a generator object. That object is stored
// in a temporary variable, a definition that is used by "yield"
- // expressions. Presence of a variable for the generator object in the
- // FunctionState indicates that this function is a generator.
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(
+ // expressions. This also marks the FunctionState as a generator.
+ Variable* temp = scope_->DeclarationScope()->NewTemporary(
isolate()->factory()->dot_generator_object_string());
function_state.set_generator_object_variable(temp);
}
@@ -4065,7 +3227,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
- scope->set_start_position(scanner().location().beg_pos);
+ scope->set_start_position(scanner()->location().beg_pos);
// We don't yet know if the function will be strict, so we cannot yet
// produce errors for parameter names or duplicates. However, we remember
@@ -4082,21 +3244,20 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Store locations for possible future error reports.
if (!eval_args_error_log.IsValid() && IsEvalOrArguments(param_name)) {
- eval_args_error_log = scanner().location();
+ eval_args_error_log = scanner()->location();
}
if (!reserved_loc.IsValid() && is_strict_reserved) {
- reserved_loc = scanner().location();
+ reserved_loc = scanner()->location();
}
- if (!dupe_error_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
+ if (!dupe_error_loc.IsValid() && scope_->IsDeclared(param_name)) {
duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
- dupe_error_loc = scanner().location();
+ dupe_error_loc = scanner()->location();
}
- top_scope_->DeclareParameter(param_name, VAR);
+ scope_->DeclareParameter(param_name, VAR);
num_parameters++;
if (num_parameters > Code::kMaxArguments) {
- ReportMessageAt(scanner().location(), "too_many_parameters",
- Vector<const char*>::empty());
+ ReportMessageAt(scanner()->location(), "too_many_parameters");
*ok = false;
return NULL;
}
@@ -4114,21 +3275,28 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxis as is the case now.
Variable* fvar = NULL;
- Token::Value fvar_init_op = Token::INIT_CONST;
+ Token::Value fvar_init_op = Token::INIT_CONST_LEGACY;
if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
- if (is_extended_mode()) fvar_init_op = Token::INIT_CONST_HARMONY;
- VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
- fvar = new(zone()) Variable(top_scope_,
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ fvar_init_op = Token::INIT_CONST;
+ }
+ VariableMode fvar_mode =
+ allow_harmony_scoping() && strict_mode() == STRICT ? CONST
+ : CONST_LEGACY;
+ fvar = new(zone()) Variable(scope_,
function_name, fvar_mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
- proxy, fvar_mode, top_scope_, RelocInfo::kNoPosition);
- top_scope_->DeclareFunctionVar(fvar_declaration);
+ proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
+ scope_->DeclareFunctionVar(fvar_declaration);
}
- // Determine whether the function will be lazily compiled.
- // The heuristics are:
+ // Determine if the function can be parsed lazily. Lazy parsing is different
+ // from lazy compilation; we need to parse more eagerly than we compile.
+
+ // We can only parse lazily if we also compile lazily. The heuristics for
+ // lazy compilation are:
// - It must not have been prohibited by the caller to Parse (some callers
// need a full AST).
// - The outer scope must allow lazy compilation of inner functions.
@@ -4138,26 +3306,45 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// compiled.
// These are all things we can know at this point, without looking at the
// function itself.
- bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
- top_scope_->AllowsLazyCompilation() &&
- !parenthesized_function_);
+
+ // In addition, we need to distinguish between these cases:
+ // (function foo() {
+ // bar = function() { return 1; }
+ // })();
+ // and
+ // (function foo() {
+ // var a = 1;
+ // bar = function() { return a; }
+ // })();
+
+ // Now foo will be parsed eagerly and compiled eagerly (optimization: assume
+ // parenthesis before the function means that it will be called
+ // immediately). The inner function *must* be parsed eagerly to resolve the
+ // possible reference to the variable in foo's scope. However, it's possible
+ // that it will be compiled lazily.
+
+ // To make this additional case work, both Parser and PreParser implement a
+ // logic where only top-level functions will be parsed lazily.
+ bool is_lazily_parsed = (mode() == PARSE_LAZILY &&
+ scope_->AllowsLazyCompilation() &&
+ !parenthesized_function_);
parenthesized_function_ = false; // The bit was set for this function only.
- if (is_lazily_compiled) {
+ if (is_lazily_parsed) {
int function_block_pos = position();
FunctionEntry entry;
- if (pre_parse_data_ != NULL) {
- // If we have pre_parse_data_, we use it to skip parsing the function
- // body. The preparser data contains the information we need to
- // construct the lazy function.
- entry = pre_parse_data()->GetFunctionEntry(function_block_pos);
+ if (cached_data_mode_ == CONSUME_CACHED_DATA) {
+ // If we have cached data, we use it to skip parsing the function body.
+ // The data contains the information we need to construct the lazy
+ // function.
+ entry = (*cached_data())->GetFunctionEntry(function_block_pos);
if (entry.is_valid()) {
if (entry.end_pos() <= function_block_pos) {
// End position greater than end of stream is safe, and hard
// to check.
ReportInvalidPreparseData(function_name, CHECK_OK);
}
- scanner().SeekForward(entry.end_pos() - 1);
+ scanner()->SeekForward(entry.end_pos() - 1);
scope->set_end_position(entry.end_pos());
Expect(Token::RBRACE, CHECK_OK);
@@ -4165,14 +3352,23 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->end_position() - function_block_pos);
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
- top_scope_->SetLanguageMode(entry.language_mode());
+ scope_->SetStrictMode(entry.strict_mode());
} else {
- is_lazily_compiled = false;
+ // This case happens when we have preparse data but it doesn't contain
+ // an entry for the function. As a safety net, fall back to eager
+ // parsing. It is unclear whether PreParser's laziness analysis can
+ // produce different results than the Parser's laziness analysis (see
+ // https://codereview.chromium.org/7565003 ). In this case, we must
+ // discard all the preparse data, since the symbol data will be wrong.
+ is_lazily_parsed = false;
+ cached_data_mode_ = NO_CACHED_DATA;
}
} else {
- // With no preparser data, we partially parse the function, without
+ // With no cached data, we partially parse the function, without
// building an AST. This gathers the data needed to build a lazy
// function.
+ // FIXME(marja): Now the PreParser doesn't need to log functions /
+ // symbols; only errors -> clean that up.
SingletonLogger logger;
PreParser::PreParseResult result = LazyParseFunctionLiteral(&logger);
if (result == PreParser::kPreParseStackOverflow) {
@@ -4187,8 +3383,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (arg != NULL) {
args = Vector<const char*>(&arg, 1);
}
- ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
- logger.message(), args);
+ ParserTraits::ReportMessageAt(
+ Scanner::Location(logger.start(), logger.end()),
+ logger.message(),
+ args);
*ok = false;
return NULL;
}
@@ -4198,15 +3396,26 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->end_position() - function_block_pos);
materialized_literal_count = logger.literals();
expected_property_count = logger.properties();
- top_scope_->SetLanguageMode(logger.language_mode());
+ scope_->SetStrictMode(logger.strict_mode());
+ if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
+ ASSERT(log_);
+ // Position right after terminal '}'.
+ int body_end = scanner()->location().end_pos;
+ log_->LogFunction(function_block_pos, body_end,
+ materialized_literal_count,
+ expected_property_count,
+ scope_->strict_mode());
+ }
}
}
- if (!is_lazily_compiled) {
+ if (!is_lazily_parsed) {
+ // Everything inside an eagerly parsed function will be parsed eagerly
+ // (see comment above).
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
body = new(zone()) ZoneList<Statement*>(8, zone());
if (fvar != NULL) {
- VariableProxy* fproxy = top_scope_->NewUnresolved(
+ VariableProxy* fproxy = scope_->NewUnresolved(
factory(), function_name, Interface::NewConst());
fproxy->BindTo(fvar);
body->Add(factory()->NewExpressionStatement(
@@ -4223,14 +3432,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
new(zone()) ZoneList<Expression*>(0, zone());
CallRuntime* allocation = factory()->NewCallRuntime(
isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject),
+ Runtime::FunctionForId(Runtime::kHiddenCreateJSGeneratorObject),
arguments, pos);
VariableProxy* init_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
VariableProxy* get_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Yield* yield = factory()->NewYield(
get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
body->Add(factory()->NewExpressionStatement(
@@ -4241,7 +3450,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_generator) {
VariableProxy* get_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Expression *undefined = factory()->NewLiteral(
isolate()->factory()->undefined_value(), RelocInfo::kNoPosition);
Yield* yield = factory()->NewYield(
@@ -4255,40 +3464,34 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
handler_count = function_state.handler_count();
Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner().location().end_pos);
+ scope->set_end_position(scanner()->location().end_pos);
}
// Validate strict mode. We can do this only after parsing the function,
// since the function can declare itself strict.
- if (!top_scope_->is_classic_mode()) {
+ if (strict_mode() == STRICT) {
if (IsEvalOrArguments(function_name)) {
- ReportMessageAt(function_name_location,
- "strict_eval_arguments",
- Vector<const char*>::empty());
+ ReportMessageAt(function_name_location, "strict_eval_arguments");
*ok = false;
return NULL;
}
if (name_is_strict_reserved) {
- ReportMessageAt(function_name_location, "unexpected_strict_reserved",
- Vector<const char*>::empty());
+ ReportMessageAt(function_name_location, "unexpected_strict_reserved");
*ok = false;
return NULL;
}
if (eval_args_error_log.IsValid()) {
- ReportMessageAt(eval_args_error_log, "strict_eval_arguments",
- Vector<const char*>::empty());
+ ReportMessageAt(eval_args_error_log, "strict_eval_arguments");
*ok = false;
return NULL;
}
if (dupe_error_loc.IsValid()) {
- ReportMessageAt(dupe_error_loc, "strict_param_dupe",
- Vector<const char*>::empty());
+ ReportMessageAt(dupe_error_loc, "strict_param_dupe");
*ok = false;
return NULL;
}
if (reserved_loc.IsValid()) {
- ReportMessageAt(reserved_loc, "unexpected_strict_reserved",
- Vector<const char*>::empty());
+ ReportMessageAt(reserved_loc, "unexpected_strict_reserved");
*ok = false;
return NULL;
}
@@ -4297,10 +3500,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CHECK_OK);
}
ast_properties = *factory()->visitor()->ast_properties();
+ slot_processor = factory()->visitor()->slot_processor();
dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
}
- if (is_extended_mode()) {
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
@@ -4320,6 +3524,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
pos);
function_literal->set_function_token_position(function_token_pos);
function_literal->set_ast_properties(&ast_properties);
+ function_literal->set_slot_processor(slot_processor);
function_literal->set_dont_optimize_reason(dont_optimize_reason);
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
@@ -4330,7 +3535,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
SingletonLogger* logger) {
HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
- ASSERT_EQ(Token::LBRACE, scanner().current_token());
+ ASSERT_EQ(Token::LBRACE, scanner()->current_token());
if (reusable_preparser_ == NULL) {
intptr_t stack_limit = isolate()->stack_guard()->real_climit();
@@ -4345,7 +3550,7 @@ PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
allow_harmony_numeric_literals());
}
PreParser::PreParseResult result =
- reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
+ reusable_preparser_->PreParseLazyFunction(strict_mode(),
is_generator(),
logger);
return result;
@@ -4365,7 +3570,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (extension_ != NULL) {
// The extension structures are only accessible while parsing the
// very first time not when reparsing because of lazy compilation.
- top_scope_->DeclarationScope()->ForceEagerCompilation();
+ scope_->DeclarationScope()->ForceEagerCompilation();
}
const Runtime::Function* function = Runtime::FunctionForName(name);
@@ -4397,7 +3602,8 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
// Check that the function is defined if it's an inline runtime call.
if (function == NULL && name->Get(0) == '_') {
- ReportMessage("not_defined", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("not_defined",
+ Vector<Handle<String> >(&name, 1));
*ok = false;
return NULL;
}
@@ -4407,199 +3613,12 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
-bool ParserBase::peek_any_identifier() {
- Token::Value next = peek();
- return next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD ||
- next == Token::YIELD;
-}
-
-
-bool ParserBase::CheckContextualKeyword(Vector<const char> keyword) {
- if (peek() == Token::IDENTIFIER &&
- scanner()->is_next_contextual_keyword(keyword)) {
- Consume(Token::IDENTIFIER);
- return true;
- }
- return false;
-}
-
-
-void ParserBase::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- Token::Value tok = peek();
- if (tok == Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner()->HasAnyLineTerminatorBeforeNext() ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- return;
- }
- Expect(Token::SEMICOLON, ok);
-}
-
-
-void ParserBase::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
- Expect(Token::IDENTIFIER, ok);
- if (!*ok) return;
- if (!scanner()->is_literal_contextual_keyword(keyword)) {
- ReportUnexpectedToken(scanner()->current_token());
- *ok = false;
- }
-}
-
-
-void ParserBase::ReportUnexpectedToken(Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram.
- if (token == Token::ILLEGAL && stack_overflow()) {
- return;
- }
- Scanner::Location source_location = scanner()->location();
-
- // Four of the tokens are treated specially
- switch (token) {
- case Token::EOS:
- return ReportMessageAt(source_location, "unexpected_eos");
- case Token::NUMBER:
- return ReportMessageAt(source_location, "unexpected_token_number");
- case Token::STRING:
- return ReportMessageAt(source_location, "unexpected_token_string");
- case Token::IDENTIFIER:
- return ReportMessageAt(source_location,
- "unexpected_token_identifier");
- case Token::FUTURE_RESERVED_WORD:
- return ReportMessageAt(source_location, "unexpected_reserved");
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessageAt(source_location,
- is_classic_mode() ? "unexpected_token_identifier"
- : "unexpected_strict_reserved");
- default:
- const char* name = Token::String(token);
- ASSERT(name != NULL);
- ReportMessageAt(
- source_location, "unexpected_token", Vector<const char*>(&name, 1));
- }
-}
-
-
Literal* Parser::GetLiteralUndefined(int position) {
return factory()->NewLiteral(
isolate()->factory()->undefined_value(), position);
}
-Literal* Parser::GetLiteralTheHole(int position) {
- return factory()->NewLiteral(
- isolate()->factory()->the_hole_value(), RelocInfo::kNoPosition);
-}
-
-
-// Parses an identifier that is valid for the current scope, in particular it
-// fails on strict mode future reserved keywords in a strict scope. If
-// allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
-// "arguments" as identifier even in strict mode (this is needed in cases like
-// "var foo = eval;").
-Handle<String> Parser::ParseIdentifier(
- AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
- bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- Handle<String> name = GetSymbol();
- if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
- !top_scope_->is_classic_mode() && IsEvalOrArguments(name)) {
- ReportMessage("strict_eval_arguments", Vector<const char*>::empty());
- *ok = false;
- }
- return name;
- } else if (top_scope_->is_classic_mode() &&
- (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !is_generator()))) {
- return GetSymbol();
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
-}
-
-
-// Parses and identifier or a strict mode future reserved word, and indicate
-// whether it is strict mode future reserved.
-Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- *is_strict_reserved = false;
- } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !is_generator())) {
- *is_strict_reserved = true;
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
- return GetSymbol();
-}
-
-
-Handle<String> Parser::ParseIdentifierName(bool* ok) {
- Token::Value next = Next();
- if (next != Token::IDENTIFIER &&
- next != Token::FUTURE_RESERVED_WORD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD &&
- !Token::IsKeyword(next)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
- return GetSymbol();
-}
-
-
-void Parser::MarkAsLValue(Expression* expression) {
- VariableProxy* proxy = expression != NULL
- ? expression->AsVariableProxy()
- : NULL;
-
- if (proxy != NULL) proxy->MarkAsLValue();
-}
-
-
-// Checks LHS expression for assignment and prefix/postfix increment/decrement
-// in strict mode.
-void Parser::CheckStrictModeLValue(Expression* expression,
- bool* ok) {
- ASSERT(!top_scope_->is_classic_mode());
- VariableProxy* lhs = expression != NULL
- ? expression->AsVariableProxy()
- : NULL;
-
- if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
- ReportMessage("strict_eval_arguments", Vector<const char*>::empty());
- *ok = false;
- }
-}
-
-
-// Checks whether an octal literal was last seen between beg_pos and end_pos.
-// If so, reports an error. Only called for strict mode.
-void ParserBase::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- Scanner::Location octal = scanner()->octal_position();
- if (octal.IsValid() && beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal");
- scanner()->clear_octal_position();
- *ok = false;
- }
-}
-
-
void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Declaration* decl = scope->CheckConflictingVarDeclarations();
if (decl != NULL) {
@@ -4613,28 +3632,12 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Scanner::Location location = position == RelocInfo::kNoPosition
? Scanner::Location::invalid()
: Scanner::Location(position, position + 1);
- ReportMessageAt(location, "redeclaration", args);
+ ParserTraits::ReportMessageAt(location, "redeclaration", args);
*ok = false;
}
}
-// This function reads an identifier name and determines whether or not it
-// is 'get' or 'set'.
-Handle<String> Parser::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Handle<String> result = ParseIdentifierName(ok);
- if (!*ok) return Handle<String>();
- if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
- const char* token = scanner().literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-
// ----------------------------------------------------------------------------
// Parser support
@@ -4818,6 +3821,7 @@ bool RegExpParser::simple() {
RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
failed_ = true;
*error_ = isolate()->factory()->NewStringFromAscii(message, NOT_TENURED);
+ ASSERT(!error_->is_null());
// Zip to the end to make sure the no more input is read.
current_ = kEndMarker;
next_pos_ = in()->length();
@@ -5333,7 +4337,7 @@ bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
uc32 RegExpParser::ParseOctalLiteral() {
- ASSERT('0' <= current() && current() <= '7');
+ ASSERT(('0' <= current() && current() <= '7') || current() == kEndMarker);
// For compatibility with some other browsers (not all), we parse
// up to three octal digits with a value below 256.
uc32 value = current() - '0';
@@ -5677,13 +4681,14 @@ bool Parser::Parse() {
result = ParseProgram();
}
} else {
- ScriptDataImpl* pre_parse_data = info()->pre_parse_data();
- set_pre_parse_data(pre_parse_data);
- if (pre_parse_data != NULL && pre_parse_data->has_error()) {
- Scanner::Location loc = pre_parse_data->MessageLocation();
- const char* message = pre_parse_data->BuildMessage();
- Vector<const char*> args = pre_parse_data->BuildArgs();
- ReportMessageAt(loc, message, args);
+ SetCachedData(info()->cached_data(), info()->cached_data_mode());
+ if (info()->cached_data_mode() == CONSUME_CACHED_DATA &&
+ (*info()->cached_data())->has_error()) {
+ ScriptDataImpl* cached_data = *(info()->cached_data());
+ Scanner::Location loc = cached_data->MessageLocation();
+ const char* message = cached_data->BuildMessage();
+ Vector<const char*> args = cached_data->BuildArgs();
+ ParserTraits::ReportMessageAt(loc, message, args);
DeleteArray(message);
for (int i = 0; i < args.length(); i++) {
DeleteArray(args[i]);
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 2b0995ace2..f49626766e 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -30,16 +30,18 @@
#include "allocation.h"
#include "ast.h"
+#include "compiler.h" // For CachedDataMode
#include "preparse-data-format.h"
#include "preparse-data.h"
#include "scopes.h"
#include "preparser.h"
namespace v8 {
+class ScriptCompiler;
+
namespace internal {
class CompilationInfo;
-class FuncNameInferrer;
class ParserLog;
class PositionStack;
class Target;
@@ -54,7 +56,7 @@ class FunctionEntry BASE_EMBEDDED {
kEndPositionIndex,
kLiteralCountIndex,
kPropertyCountIndex,
- kLanguageModeIndex,
+ kStrictModeIndex,
kSize
};
@@ -67,11 +69,10 @@ class FunctionEntry BASE_EMBEDDED {
int end_pos() { return backing_[kEndPositionIndex]; }
int literal_count() { return backing_[kLiteralCountIndex]; }
int property_count() { return backing_[kPropertyCountIndex]; }
- LanguageMode language_mode() {
- ASSERT(backing_[kLanguageModeIndex] == CLASSIC_MODE ||
- backing_[kLanguageModeIndex] == STRICT_MODE ||
- backing_[kLanguageModeIndex] == EXTENDED_MODE);
- return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
+ StrictMode strict_mode() {
+ ASSERT(backing_[kStrictModeIndex] == SLOPPY ||
+ backing_[kStrictModeIndex] == STRICT);
+ return static_cast<StrictMode>(backing_[kStrictModeIndex]);
}
bool is_valid() { return !backing_.is_empty(); }
@@ -119,6 +120,7 @@ class ScriptDataImpl : public ScriptData {
unsigned version() { return store_[PreparseDataConstants::kVersionOffset]; }
private:
+ friend class v8::ScriptCompiler;
Vector<unsigned> store_;
unsigned char* symbol_data_;
unsigned char* symbol_data_end_;
@@ -404,10 +406,198 @@ class RegExpParser BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// JAVASCRIPT PARSING
-// Forward declaration.
+class Parser;
class SingletonLogger;
-class Parser : public ParserBase {
+class ParserTraits {
+ public:
+ struct Type {
+ // TODO(marja): To be removed. The Traits object should contain all the data
+ // it needs.
+ typedef v8::internal::Parser* Parser;
+
+ // Used by FunctionState and BlockState.
+ typedef v8::internal::Scope Scope;
+ typedef Variable GeneratorVariable;
+ typedef v8::internal::Zone Zone;
+
+ // Return types for traversing functions.
+ typedef Handle<String> Identifier;
+ typedef v8::internal::Expression* Expression;
+ typedef Yield* YieldExpression;
+ typedef v8::internal::FunctionLiteral* FunctionLiteral;
+ typedef v8::internal::Literal* Literal;
+ typedef ObjectLiteral::Property* ObjectLiteralProperty;
+ typedef ZoneList<v8::internal::Expression*>* ExpressionList;
+ typedef ZoneList<ObjectLiteral::Property*>* PropertyList;
+
+ // For constructing objects returned by the traversing functions.
+ typedef AstNodeFactory<AstConstructionVisitor> Factory;
+ };
+
+ explicit ParserTraits(Parser* parser) : parser_(parser) {}
+
+ // Custom operations executed when FunctionStates are created and destructed.
+ template<typename FunctionState>
+ static void SetUpFunctionState(FunctionState* function_state, Zone* zone) {
+ Isolate* isolate = zone->isolate();
+ function_state->isolate_ = isolate;
+ function_state->saved_ast_node_id_ = isolate->ast_node_id();
+ isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
+ }
+
+ template<typename FunctionState>
+ static void TearDownFunctionState(FunctionState* function_state) {
+ if (function_state->outer_function_state_ != NULL) {
+ function_state->isolate_->set_ast_node_id(
+ function_state->saved_ast_node_id_);
+ }
+ }
+
+ // Helper functions for recursive descent.
+ bool IsEvalOrArguments(Handle<String> identifier) const;
+
+ // Returns true if the expression is of type "this.foo".
+ static bool IsThisProperty(Expression* expression);
+
+ static bool IsIdentifier(Expression* expression);
+
+ static bool IsBoilerplateProperty(ObjectLiteral::Property* property) {
+ return ObjectLiteral::IsBoilerplateProperty(property);
+ }
+
+ static bool IsArrayIndex(Handle<String> string, uint32_t* index) {
+ return !string.is_null() && string->AsArrayIndex(index);
+ }
+
+ // Functions for encapsulating the differences between parsing and preparsing;
+ // operations interleaved with the recursive descent.
+ static void PushLiteralName(FuncNameInferrer* fni, Handle<String> id) {
+ fni->PushLiteralName(id);
+ }
+ void PushPropertyName(FuncNameInferrer* fni, Expression* expression);
+
+ static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
+ Scope* scope, Expression* value, bool* has_function) {
+ if (scope->DeclarationScope()->is_global_scope() &&
+ value->AsFunctionLiteral() != NULL) {
+ *has_function = true;
+ value->AsFunctionLiteral()->set_pretenure();
+ }
+ }
+
+ // If we assign a function literal to a property we pretenure the
+ // literal so it can be added as a constant function property.
+ static void CheckAssigningFunctionLiteralToProperty(Expression* left,
+ Expression* right);
+
+ // Keep track of eval() calls since they disable all local variable
+ // optimizations. This checks if expression is an eval call, and if yes,
+ // forwards the information to scope.
+ void CheckPossibleEvalCall(Expression* expression, Scope* scope);
+
+ // Determine if the expression is a variable proxy and mark it as being used
+ // in an assignment or with a increment/decrement operator. This is currently
+ // used on for the statically checking assignments to harmony const bindings.
+ static Expression* MarkExpressionAsLValue(Expression* expression);
+
+ // Checks LHS expression for assignment and prefix/postfix increment/decrement
+ // in strict mode.
+ void CheckStrictModeLValue(Expression* expression, bool* ok);
+
+ // Returns true if we have a binary expression between two numeric
+ // literals. In that case, *x will be changed to an expression which is the
+ // computed value.
+ bool ShortcutNumericLiteralBinaryExpression(
+ Expression** x, Expression* y, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
+ // Rewrites the following types of unary expressions:
+ // not <literal> -> true / false
+ // + <numeric literal> -> <numeric literal>
+ // - <numeric literal> -> <numeric literal with value negated>
+ // ! <literal> -> true / false
+ // The following rewriting rules enable the collection of type feedback
+ // without any special stub and the multiplication is removed later in
+ // Crankshaft's canonicalization pass.
+ // + foo -> foo * 1
+ // - foo -> foo * (-1)
+ // ~ foo -> foo ^(~0)
+ Expression* BuildUnaryExpression(
+ Expression* expression, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
+ // Reporting errors.
+ void ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<const char*> args,
+ bool is_reference_error = false);
+ void ReportMessage(const char* message,
+ Vector<Handle<String> > args,
+ bool is_reference_error = false);
+ void ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<Handle<String> > args,
+ bool is_reference_error = false);
+
+ // "null" return type creators.
+ static Handle<String> EmptyIdentifier() {
+ return Handle<String>();
+ }
+ static Expression* EmptyExpression() {
+ return NULL;
+ }
+ static Literal* EmptyLiteral() {
+ return NULL;
+ }
+ // Used in error return values.
+ static ZoneList<Expression*>* NullExpressionList() {
+ return NULL;
+ }
+
+ // Odd-ball literal creators.
+ Literal* GetLiteralTheHole(int position,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
+ // Producing data during the recursive descent.
+ Handle<String> GetSymbol(Scanner* scanner = NULL);
+ Handle<String> NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured);
+ Expression* ThisExpression(Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Literal* ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Expression* ExpressionFromIdentifier(
+ Handle<String> name, int pos, Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Expression* ExpressionFromString(
+ int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ ZoneList<v8::internal::Expression*>* NewExpressionList(int size, Zone* zone) {
+ return new(zone) ZoneList<v8::internal::Expression*>(size, zone);
+ }
+ ZoneList<ObjectLiteral::Property*>* NewPropertyList(int size, Zone* zone) {
+ return new(zone) ZoneList<ObjectLiteral::Property*>(size, zone);
+ }
+
+ // Temporary glue; these functions will move to ParserBase.
+ Expression* ParseV8Intrinsic(bool* ok);
+ FunctionLiteral* ParseFunctionLiteral(
+ Handle<String> name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ bool* ok);
+
+ private:
+ Parser* parser_;
+};
+
+
+class Parser : public ParserBase<ParserTraits> {
public:
explicit Parser(CompilationInfo* info);
~Parser() {
@@ -427,12 +617,16 @@ class Parser : public ParserBase {
bool Parse();
private:
- static const int kMaxNumFunctionLocals = 131071; // 2^17-1
+ friend class ParserTraits;
- enum Mode {
- PARSE_LAZILY,
- PARSE_EAGERLY
- };
+ // Limit the allowed number of local variables in a function. The hard limit
+ // is that offsets computed by FullCodeGenerator::StackOperand and similar
+ // functions are ints, and they should not overflow. In addition, accessing
+ // local variables creates user-controlled constants in the generated code,
+ // and we don't want too much user-controlled memory inside the code (this was
+ // the reason why this limit was introduced in the first place; see
+ // https://codereview.chromium.org/7003030/ ).
+ static const int kMaxNumFunctionLocals = 4194303; // 2^22-1
enum VariableDeclarationContext {
kModuleElement,
@@ -447,84 +641,6 @@ class Parser : public ParserBase {
kHasNoInitializers
};
- class BlockState;
-
- class FunctionState BASE_EMBEDDED {
- public:
- FunctionState(Parser* parser, Scope* scope);
- ~FunctionState();
-
- int NextMaterializedLiteralIndex() {
- return next_materialized_literal_index_++;
- }
- int materialized_literal_count() {
- return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
- }
-
- int NextHandlerIndex() { return next_handler_index_++; }
- int handler_count() { return next_handler_index_; }
-
- void AddProperty() { expected_property_count_++; }
- int expected_property_count() { return expected_property_count_; }
-
- void set_generator_object_variable(Variable *variable) {
- ASSERT(variable != NULL);
- ASSERT(!is_generator());
- generator_object_variable_ = variable;
- }
- Variable* generator_object_variable() const {
- return generator_object_variable_;
- }
- bool is_generator() const {
- return generator_object_variable_ != NULL;
- }
-
- AstNodeFactory<AstConstructionVisitor>* factory() { return &factory_; }
-
- private:
- // Used to assign an index to each literal that needs materialization in
- // the function. Includes regexp literals, and boilerplate for object and
- // array literals.
- int next_materialized_literal_index_;
-
- // Used to assign a per-function index to try and catch handlers.
- int next_handler_index_;
-
- // Properties count estimation.
- int expected_property_count_;
-
- // For generators, the variable that holds the generator object. This
- // variable is used by yield expressions and return statements. NULL
- // indicates that this function is not a generator.
- Variable* generator_object_variable_;
-
- Parser* parser_;
- FunctionState* outer_function_state_;
- Scope* outer_scope_;
- int saved_ast_node_id_;
- AstNodeFactory<AstConstructionVisitor> factory_;
- };
-
- class ParsingModeScope BASE_EMBEDDED {
- public:
- ParsingModeScope(Parser* parser, Mode mode)
- : parser_(parser),
- old_mode_(parser->mode()) {
- parser_->mode_ = mode;
- }
- ~ParsingModeScope() {
- parser_->mode_ = old_mode_;
- }
-
- private:
- Parser* parser_;
- Mode old_mode_;
- };
-
- virtual bool is_classic_mode() {
- return top_scope_->is_classic_mode();
- }
-
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram();
@@ -532,7 +648,6 @@ class Parser : public ParserBase {
FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
CompilationInfo* info() const { return info_; }
// Called by ParseProgram after setting up the scanner.
@@ -541,39 +656,27 @@ class Parser : public ParserBase {
// Report syntax error
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
- void ReportMessage(const char* message, Vector<const char*> args);
- void ReportMessage(const char* message, Vector<Handle<String> > args);
- void ReportMessageAt(Scanner::Location location, const char* type) {
- ReportMessageAt(location, type, Vector<const char*>::empty());
- }
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<const char*> args);
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<Handle<String> > args);
- void set_pre_parse_data(ScriptDataImpl *data) {
- pre_parse_data_ = data;
- symbol_cache_.Initialize(data ? data->symbol_count() : 0, zone());
+ void SetCachedData(ScriptDataImpl** data,
+ CachedDataMode cached_data_mode) {
+ cached_data_mode_ = cached_data_mode;
+ if (cached_data_mode == NO_CACHED_DATA) {
+ cached_data_ = NULL;
+ } else {
+ ASSERT(data != NULL);
+ cached_data_ = data;
+ symbol_cache_.Initialize(*data ? (*data)->symbol_count() : 0, zone());
+ }
}
- bool inside_with() const { return top_scope_->inside_with(); }
- Scanner& scanner() { return scanner_; }
- Mode mode() const { return mode_; }
- ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
- bool is_extended_mode() {
- ASSERT(top_scope_ != NULL);
- return top_scope_->is_extended_mode();
- }
+ bool inside_with() const { return scope_->inside_with(); }
+ ScriptDataImpl** cached_data() const { return cached_data_; }
+ CachedDataMode cached_data_mode() const { return cached_data_mode_; }
Scope* DeclarationScope(VariableMode mode) {
return IsLexicalVariableMode(mode)
- ? top_scope_ : top_scope_->DeclarationScope();
+ ? scope_ : scope_->DeclarationScope();
}
- // Check if the given string is 'eval' or 'arguments'.
- bool IsEvalOrArguments(Handle<String> string);
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -623,31 +726,12 @@ class Parser : public ParserBase {
// Support for hamony block scoped bindings.
Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
- Expression* ParseExpression(bool accept_IN, bool* ok);
- Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
- Expression* ParseYieldExpression(bool* ok);
- Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression* ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- Expression* ParseUnaryExpression(bool* ok);
- Expression* ParsePostfixExpression(bool* ok);
- Expression* ParseLeftHandSideExpression(bool* ok);
- Expression* ParseNewExpression(bool* ok);
- Expression* ParseMemberExpression(bool* ok);
- Expression* ParseNewPrefix(PositionStack* stack, bool* ok);
- Expression* ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok);
- Expression* ParsePrimaryExpression(bool* ok);
- Expression* ParseArrayLiteral(bool* ok);
- Expression* ParseObjectLiteral(bool* ok);
- Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
-
// Initialize the components of a for-in / for-of statement.
void InitializeForEachStatement(ForEachStatement* stmt,
Expression* each,
Expression* subject,
Statement* body);
- ZoneList<Expression*>* ParseArguments(bool* ok);
FunctionLiteral* ParseFunctionLiteral(
Handle<String> name,
Scanner::Location function_name_location,
@@ -660,52 +744,10 @@ class Parser : public ParserBase {
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
- bool is_generator() const { return current_function_state_->is_generator(); }
-
bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode);
- Handle<String> LiteralString(PretenureFlag tenured) {
- if (scanner().is_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().literal_utf16_string(), tenured);
- }
- }
-
- Handle<String> NextLiteralString(PretenureFlag tenured) {
- if (scanner().is_next_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().next_literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().next_literal_utf16_string(), tenured);
- }
- }
-
- Handle<String> GetSymbol();
-
// Get odd-ball literals.
Literal* GetLiteralUndefined(int position);
- Literal* GetLiteralTheHole(int position);
-
- Handle<String> ParseIdentifier(AllowEvalOrArgumentsAsIdentifier, bool* ok);
- Handle<String> ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok);
- Handle<String> ParseIdentifierName(bool* ok);
- Handle<String> ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
- // Determine if the expression is a variable proxy and mark it as being used
- // in an assignment or with a increment/decrement operator. This is currently
- // used on for the statically checking assignments to harmony const bindings.
- void MarkAsLValue(Expression* expression);
-
- // Strict mode validation of LValue expressions
- void CheckStrictModeLValue(Expression* expression,
- bool* ok);
// For harmony block scoping mode: Check if the scope has conflicting var/let
// declarations from different scopes. It covers for example
@@ -734,8 +776,6 @@ class Parser : public ParserBase {
Scope* NewScope(Scope* parent, ScopeType type);
- Handle<String> LookupSymbol(int symbol_id);
-
Handle<String> LookupCachedSymbol(int symbol_id);
// Generate AST node that throw a ReferenceError with the given type.
@@ -760,35 +800,18 @@ class Parser : public ParserBase {
PreParser::PreParseResult LazyParseFunctionLiteral(
SingletonLogger* logger);
- AstNodeFactory<AstConstructionVisitor>* factory() {
- return current_function_state_->factory();
- }
-
Isolate* isolate_;
ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
Scanner scanner_;
PreParser* reusable_preparser_;
- Scope* top_scope_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
- FunctionState* current_function_state_;
Target* target_stack_; // for break, continue statements
- v8::Extension* extension_;
- ScriptDataImpl* pre_parse_data_;
- FuncNameInferrer* fni_;
-
- Mode mode_;
- // If true, the next (and immediately following) function literal is
- // preceded by a parenthesis.
- // Heuristically that means that the function will be called immediately,
- // so never lazily compile it.
- bool parenthesized_function_;
+ ScriptDataImpl** cached_data_;
+ CachedDataMode cached_data_mode_;
- Zone* zone_;
CompilationInfo* info_;
- friend class BlockState;
- friend class FunctionState;
};
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index ac804398f1..4ae9bec9ec 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -51,7 +51,7 @@ namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -60,7 +60,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
// On Cygwin, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL);
ASSERT(utc != -1);
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 9ab6583e06..7d15cef6b0 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -61,7 +61,7 @@ namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -70,7 +70,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index fbcad8f6d1..527b9f616d 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -53,7 +53,8 @@
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
@@ -117,7 +118,7 @@ bool OS::ArmUsingHardFloat() {
#endif // def __arm__
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -126,7 +127,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 683a04d381..25ba0da087 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -182,7 +182,7 @@ void OS::SignalCodeMovingGC() {
}
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -191,7 +191,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index c881d4735d..a5d477d614 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -59,7 +59,7 @@ namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -68,7 +68,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 402d411327..5ca12522c1 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -265,10 +265,10 @@ void OS::Sleep(int milliseconds) {
void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- if (FLAG_break_on_abort) {
- DebugBreak();
+ if (FLAG_hard_abort) {
+ V8_IMMEDIATE_CRASH();
}
+ // Redirect to std abort to signal abnormal program termination.
abort();
}
@@ -276,6 +276,8 @@ void OS::Abort() {
void OS::DebugBreak() {
#if V8_HOST_ARCH_ARM
asm("bkpt 0");
+#elif V8_HOST_ARCH_ARM64
+ asm("brk 0");
#elif V8_HOST_ARCH_MIPS
asm("break");
#elif V8_HOST_ARCH_IA32
@@ -352,7 +354,25 @@ double OS::TimeCurrentMillis() {
}
-double OS::DaylightSavingsOffset(double time) {
+class TimezoneCache {};
+
+
+TimezoneCache* OS::CreateTimezoneCache() {
+ return NULL;
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+ ASSERT(cache == NULL);
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+ ASSERT(cache == NULL);
+}
+
+
+double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
if (std::isnan(time)) return nan_value();
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -560,6 +580,8 @@ class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
pthread_t thread_; // Thread handle for pthread.
+ // Synchronizes thread creation
+ Mutex thread_creation_mutex_;
};
Thread::Thread(const Options& options)
@@ -607,10 +629,10 @@ static void SetThreadName(const char* name) {
static void* ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
+ // We take the lock here to make sure that pthread_create finished first since
+ // we don't know which thread will run first (the original thread or the new
+ // one).
+ { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
SetThreadName(thread->name());
ASSERT(thread->data()->thread_ != kNoThread);
thread->NotifyStartedAndRun();
@@ -637,7 +659,10 @@ void Thread::Start() {
ASSERT_EQ(0, result);
}
#endif
- result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+ {
+ LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
+ result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+ }
ASSERT_EQ(0, result);
result = pthread_attr_destroy(&attr);
ASSERT_EQ(0, result);
diff --git a/deps/v8/src/platform-qnx.cc b/deps/v8/src/platform-qnx.cc
index cd031e7956..ef0998f89a 100644
--- a/deps/v8/src/platform-qnx.cc
+++ b/deps/v8/src/platform-qnx.cc
@@ -110,7 +110,7 @@ bool OS::ArmUsingHardFloat() {
#endif // __arm__
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -119,7 +119,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 4d910d47ad..f23ae0838b 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -80,7 +80,7 @@ namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -89,7 +89,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
tzset();
return -static_cast<double>(timezone * msPerSecond);
}
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 56261735b8..fe84bcd3ff 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -218,6 +218,97 @@ void MathSetup() {
}
+class TimezoneCache {
+ public:
+ TimezoneCache() : initialized_(false) { }
+
+ void Clear() {
+ initialized_ = false;
+ }
+
+ // Initialize timezone information. The timezone information is obtained from
+ // windows. If we cannot get the timezone information we fall back to CET.
+ void InitializeIfNeeded() {
+ // Just return if timezone information has already been initialized.
+ if (initialized_) return;
+
+ // Initialize POSIX time zone data.
+ _tzset();
+ // Obtain timezone information from operating system.
+ memset(&tzinfo_, 0, sizeof(tzinfo_));
+ if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
+ // If we cannot get timezone information we fall back to CET.
+ tzinfo_.Bias = -60;
+ tzinfo_.StandardDate.wMonth = 10;
+ tzinfo_.StandardDate.wDay = 5;
+ tzinfo_.StandardDate.wHour = 3;
+ tzinfo_.StandardBias = 0;
+ tzinfo_.DaylightDate.wMonth = 3;
+ tzinfo_.DaylightDate.wDay = 5;
+ tzinfo_.DaylightDate.wHour = 2;
+ tzinfo_.DaylightBias = -60;
+ }
+
+ // Make standard and DST timezone names.
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
+ std_tz_name_, kTzNameSize, NULL, NULL);
+ std_tz_name_[kTzNameSize - 1] = '\0';
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
+ dst_tz_name_, kTzNameSize, NULL, NULL);
+ dst_tz_name_[kTzNameSize - 1] = '\0';
+
+ // If OS returned empty string or resource id (like "@tzres.dll,-211")
+ // simply guess the name from the UTC bias of the timezone.
+ // To properly resolve the resource identifier requires a library load,
+ // which is not possible in a sandbox.
+ if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
+ OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize - 1),
+ "%s Standard Time",
+ GuessTimezoneNameFromBias(tzinfo_.Bias));
+ }
+ if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
+ OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize - 1),
+ "%s Daylight Time",
+ GuessTimezoneNameFromBias(tzinfo_.Bias));
+ }
+ // Timezone information initialized.
+ initialized_ = true;
+ }
+
+ // Guess the name of the timezone from the bias.
+ // The guess is very biased towards the northern hemisphere.
+ const char* GuessTimezoneNameFromBias(int bias) {
+ static const int kHour = 60;
+ switch (-bias) {
+ case -9*kHour: return "Alaska";
+ case -8*kHour: return "Pacific";
+ case -7*kHour: return "Mountain";
+ case -6*kHour: return "Central";
+ case -5*kHour: return "Eastern";
+ case -4*kHour: return "Atlantic";
+ case 0*kHour: return "GMT";
+ case +1*kHour: return "Central Europe";
+ case +2*kHour: return "Eastern Europe";
+ case +3*kHour: return "Russia";
+ case +5*kHour + 30: return "India";
+ case +8*kHour: return "China";
+ case +9*kHour: return "Japan";
+ case +12*kHour: return "New Zealand";
+ default: return "Local";
+ }
+ }
+
+
+ private:
+ static const int kTzNameSize = 128;
+ bool initialized_;
+ char std_tz_name_[kTzNameSize];
+ char dst_tz_name_[kTzNameSize];
+ TIME_ZONE_INFORMATION tzinfo_;
+ friend class Win32Time;
+};
+
+
// ----------------------------------------------------------------------------
// The Time class represents time on win32. A timestamp is represented as
// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
@@ -242,14 +333,14 @@ class Win32Time {
// LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
// routine also takes into account whether daylight saving is effect
// at the time.
- int64_t LocalOffset();
+ int64_t LocalOffset(TimezoneCache* cache);
// Returns the daylight savings time offset for the time in milliseconds.
- int64_t DaylightSavingsOffset();
+ int64_t DaylightSavingsOffset(TimezoneCache* cache);
// Returns a string identifying the current timezone for the
// timestamp taking into account daylight saving.
- char* LocalTimezone();
+ char* LocalTimezone(TimezoneCache* cache);
private:
// Constants for time conversion.
@@ -258,25 +349,10 @@ class Win32Time {
static const int64_t kMsPerMinute = 60000;
// Constants for timezone information.
- static const int kTzNameSize = 128;
static const bool kShortTzNames = false;
- // Timezone information. We need to have static buffers for the
- // timezone names because we return pointers to these in
- // LocalTimezone().
- static bool tz_initialized_;
- static TIME_ZONE_INFORMATION tzinfo_;
- static char std_tz_name_[kTzNameSize];
- static char dst_tz_name_[kTzNameSize];
-
- // Initialize the timezone information (if not already done).
- static void TzSet();
-
- // Guess the name of the timezone from the bias.
- static const char* GuessTimezoneNameFromBias(int bias);
-
// Return whether or not daylight savings time is in effect at this time.
- bool InDST();
+ bool InDST(TimezoneCache* cache);
// Accessor for FILETIME representation.
FILETIME& ft() { return time_.ft_; }
@@ -298,13 +374,6 @@ class Win32Time {
};
-// Static variables.
-bool Win32Time::tz_initialized_ = false;
-TIME_ZONE_INFORMATION Win32Time::tzinfo_;
-char Win32Time::std_tz_name_[kTzNameSize];
-char Win32Time::dst_tz_name_[kTzNameSize];
-
-
// Initialize timestamp to start of epoc.
Win32Time::Win32Time() {
t() = 0;
@@ -393,90 +462,13 @@ void Win32Time::SetToCurrentTime() {
}
-// Guess the name of the timezone from the bias.
-// The guess is very biased towards the northern hemisphere.
-const char* Win32Time::GuessTimezoneNameFromBias(int bias) {
- static const int kHour = 60;
- switch (-bias) {
- case -9*kHour: return "Alaska";
- case -8*kHour: return "Pacific";
- case -7*kHour: return "Mountain";
- case -6*kHour: return "Central";
- case -5*kHour: return "Eastern";
- case -4*kHour: return "Atlantic";
- case 0*kHour: return "GMT";
- case +1*kHour: return "Central Europe";
- case +2*kHour: return "Eastern Europe";
- case +3*kHour: return "Russia";
- case +5*kHour + 30: return "India";
- case +8*kHour: return "China";
- case +9*kHour: return "Japan";
- case +12*kHour: return "New Zealand";
- default: return "Local";
- }
-}
-
-
-// Initialize timezone information. The timezone information is obtained from
-// windows. If we cannot get the timezone information we fall back to CET.
-// Please notice that this code is not thread-safe.
-void Win32Time::TzSet() {
- // Just return if timezone information has already been initialized.
- if (tz_initialized_) return;
-
- // Initialize POSIX time zone data.
- _tzset();
- // Obtain timezone information from operating system.
- memset(&tzinfo_, 0, sizeof(tzinfo_));
- if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
- // If we cannot get timezone information we fall back to CET.
- tzinfo_.Bias = -60;
- tzinfo_.StandardDate.wMonth = 10;
- tzinfo_.StandardDate.wDay = 5;
- tzinfo_.StandardDate.wHour = 3;
- tzinfo_.StandardBias = 0;
- tzinfo_.DaylightDate.wMonth = 3;
- tzinfo_.DaylightDate.wDay = 5;
- tzinfo_.DaylightDate.wHour = 2;
- tzinfo_.DaylightBias = -60;
- }
-
- // Make standard and DST timezone names.
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
- std_tz_name_, kTzNameSize, NULL, NULL);
- std_tz_name_[kTzNameSize - 1] = '\0';
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
- dst_tz_name_, kTzNameSize, NULL, NULL);
- dst_tz_name_[kTzNameSize - 1] = '\0';
-
- // If OS returned empty string or resource id (like "@tzres.dll,-211")
- // simply guess the name from the UTC bias of the timezone.
- // To properly resolve the resource identifier requires a library load,
- // which is not possible in a sandbox.
- if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
- OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize - 1),
- "%s Standard Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
- if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
- OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize - 1),
- "%s Daylight Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
-
- // Timezone information initialized.
- tz_initialized_ = true;
-}
-
-
// Return the local timezone offset in milliseconds east of UTC. This
// takes into account whether daylight saving is in effect at the time.
// Only times in the 32-bit Unix range may be passed to this function.
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
-int64_t Win32Time::LocalOffset() {
- // Initialize timezone information, if needed.
- TzSet();
+int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
+ cache->InitializeIfNeeded();
Win32Time rounded_to_second(*this);
rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
@@ -499,29 +491,30 @@ int64_t Win32Time::LocalOffset() {
if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
if (posix_local_time_struct.tm_isdst > 0) {
- return (tzinfo_.Bias + tzinfo_.DaylightBias) * -kMsPerMinute;
+ return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
} else if (posix_local_time_struct.tm_isdst == 0) {
- return (tzinfo_.Bias + tzinfo_.StandardBias) * -kMsPerMinute;
+ return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
} else {
- return tzinfo_.Bias * -kMsPerMinute;
+ return cache->tzinfo_.Bias * -kMsPerMinute;
}
}
// Return whether or not daylight savings time is in effect at this time.
-bool Win32Time::InDST() {
- // Initialize timezone information, if needed.
- TzSet();
+bool Win32Time::InDST(TimezoneCache* cache) {
+ cache->InitializeIfNeeded();
// Determine if DST is in effect at the specified time.
bool in_dst = false;
- if (tzinfo_.StandardDate.wMonth != 0 || tzinfo_.DaylightDate.wMonth != 0) {
+ if (cache->tzinfo_.StandardDate.wMonth != 0 ||
+ cache->tzinfo_.DaylightDate.wMonth != 0) {
// Get the local timezone offset for the timestamp in milliseconds.
- int64_t offset = LocalOffset();
+ int64_t offset = LocalOffset(cache);
// Compute the offset for DST. The bias parameters in the timezone info
// are specified in minutes. These must be converted to milliseconds.
- int64_t dstofs = -(tzinfo_.Bias + tzinfo_.DaylightBias) * kMsPerMinute;
+ int64_t dstofs =
+ -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute;
// If the local time offset equals the timezone bias plus the daylight
// bias then DST is in effect.
@@ -533,17 +526,17 @@ bool Win32Time::InDST() {
// Return the daylight savings time offset for this time.
-int64_t Win32Time::DaylightSavingsOffset() {
- return InDST() ? 60 * kMsPerMinute : 0;
+int64_t Win32Time::DaylightSavingsOffset(TimezoneCache* cache) {
+ return InDST(cache) ? 60 * kMsPerMinute : 0;
}
// Returns a string identifying the current timezone for the
// timestamp taking into account daylight saving.
-char* Win32Time::LocalTimezone() {
+char* Win32Time::LocalTimezone(TimezoneCache* cache) {
// Return the standard or DST time zone name based on whether daylight
// saving is in effect at the given time.
- return InDST() ? dst_tz_name_ : std_tz_name_;
+ return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_;
}
@@ -586,27 +579,43 @@ double OS::TimeCurrentMillis() {
}
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new TimezoneCache();
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+ delete cache;
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+ cache->Clear();
+}
+
+
// Returns a string identifying the current timezone taking into
// account daylight saving.
-const char* OS::LocalTimezone(double time) {
- return Win32Time(time).LocalTimezone();
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ return Win32Time(time).LocalTimezone(cache);
}
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
// Use current time, rounded to the millisecond.
Win32Time t(TimeCurrentMillis());
// Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
- return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
+ return static_cast<double>(t.LocalOffset(cache) -
+ t.DaylightSavingsOffset(cache));
}
// Returns the daylight savings offset in milliseconds for the given
// time.
-double OS::DaylightSavingsOffset(double time) {
- int64_t offset = Win32Time(time).DaylightSavingsOffset();
+double OS::DaylightSavingsOffset(double time, TimezoneCache* cache) {
+ int64_t offset = Win32Time(time).DaylightSavingsOffset(cache);
return static_cast<double>(offset);
}
@@ -662,15 +671,15 @@ static bool HasConsole() {
static void VPrintHelper(FILE* stream, const char* format, va_list args) {
- if (HasConsole()) {
- vfprintf(stream, format, args);
- } else {
+ if ((stream == stdout || stream == stderr) && !HasConsole()) {
// It is important to use safe print here in order to avoid
// overflowing the buffer. We might truncate the output, but this
// does not crash.
EmbeddedVector<char, 4096> buffer;
OS::VSNPrintF(buffer, format, args);
OutputDebugStringA(buffer.start());
+ } else {
+ vfprintf(stream, format, args);
}
}
@@ -923,12 +932,11 @@ void OS::Sleep(int milliseconds) {
void OS::Abort() {
- if (IsDebuggerPresent() || FLAG_break_on_abort) {
- DebugBreak();
- } else {
- // Make the MSVCRT do a silent abort.
- raise(SIGABRT);
+ if (FLAG_hard_abort) {
+ V8_IMMEDIATE_CRASH();
}
+ // Make the MSVCRT do a silent abort.
+ raise(SIGABRT);
}
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 8af90f1cb3..d087d2397d 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -159,6 +159,9 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
#endif // V8_NO_FAST_TLS
+class TimezoneCache;
+
+
// ----------------------------------------------------------------------------
// OS
//
@@ -182,16 +185,20 @@ class OS {
// 00:00:00 UTC, January 1, 1970.
static double TimeCurrentMillis();
+ static TimezoneCache* CreateTimezoneCache();
+ static void DisposeTimezoneCache(TimezoneCache* cache);
+ static void ClearTimezoneCache(TimezoneCache* cache);
+
// Returns a string identifying the current time zone. The
// timestamp is used for determining if DST is in effect.
- static const char* LocalTimezone(double time);
+ static const char* LocalTimezone(double time, TimezoneCache* cache);
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
- static double LocalTimeOffset();
+ static double LocalTimeOffset(TimezoneCache* cache);
// Returns the daylight savings offset for the given time.
- static double DaylightSavingsOffset(double time);
+ static double DaylightSavingsOffset(double time, TimezoneCache* cache);
// Returns last OS error.
static int GetLastError();
diff --git a/deps/v8/src/preparse-data-format.h b/deps/v8/src/preparse-data-format.h
index e64326e578..e2cf0a1a3e 100644
--- a/deps/v8/src/preparse-data-format.h
+++ b/deps/v8/src/preparse-data-format.h
@@ -37,7 +37,7 @@ struct PreparseDataConstants {
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 7;
+ static const unsigned kCurrentVersion = 8;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/preparse-data.cc
index 8e08848285..9f585a991f 100644
--- a/deps/v8/src/preparse-data.cc
+++ b/deps/v8/src/preparse-data.cc
@@ -37,13 +37,40 @@
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// FunctionLoggingParserRecorder
-FunctionLoggingParserRecorder::FunctionLoggingParserRecorder()
+template <typename Char>
+static int vector_hash(Vector<const Char> string) {
+ int hash = 0;
+ for (int i = 0; i < string.length(); i++) {
+ int c = static_cast<int>(string[i]);
+ hash += c;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+ return hash;
+}
+
+
+static bool vector_compare(void* a, void* b) {
+ CompleteParserRecorder::Key* string1 =
+ reinterpret_cast<CompleteParserRecorder::Key*>(a);
+ CompleteParserRecorder::Key* string2 =
+ reinterpret_cast<CompleteParserRecorder::Key*>(b);
+ if (string1->is_one_byte != string2->is_one_byte) return false;
+ int length = string1->literal_bytes.length();
+ if (string2->literal_bytes.length() != length) return false;
+ return memcmp(string1->literal_bytes.start(),
+ string2->literal_bytes.start(), length) == 0;
+}
+
+
+CompleteParserRecorder::CompleteParserRecorder()
: function_store_(0),
- is_recording_(true),
- pause_count_(0) {
+ literal_chars_(0),
+ symbol_store_(0),
+ symbol_keys_(0),
+ string_table_(vector_compare),
+ symbol_id_(0) {
preamble_[PreparseDataConstants::kMagicOffset] =
PreparseDataConstants::kMagicNumber;
preamble_[PreparseDataConstants::kVersionOffset] =
@@ -56,10 +83,11 @@ FunctionLoggingParserRecorder::FunctionLoggingParserRecorder()
#ifdef DEBUG
prev_start_ = -1;
#endif
+ should_log_symbols_ = true;
}
-void FunctionLoggingParserRecorder::LogMessage(int start_pos,
+void CompleteParserRecorder::LogMessage(int start_pos,
int end_pos,
const char* message,
const char* arg_opt) {
@@ -75,11 +103,11 @@ void FunctionLoggingParserRecorder::LogMessage(int start_pos,
STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 3);
WriteString(CStrVector(message));
if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
- is_recording_ = false;
+ should_log_symbols_ = false;
}
-void FunctionLoggingParserRecorder::WriteString(Vector<const char> str) {
+void CompleteParserRecorder::WriteString(Vector<const char> str) {
function_store_.Add(str.length());
for (int i = 0; i < str.length(); i++) {
function_store_.Add(str[i]);
@@ -87,43 +115,27 @@ void FunctionLoggingParserRecorder::WriteString(Vector<const char> str) {
}
-// ----------------------------------------------------------------------------
-// PartialParserRecorder - Record both function entries and symbols.
-
-Vector<unsigned> PartialParserRecorder::ExtractData() {
- int function_size = function_store_.size();
- int total_size = PreparseDataConstants::kHeaderSize + function_size;
- Vector<unsigned> data = Vector<unsigned>::New(total_size);
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
- OS::MemCopy(data.start(), preamble_, sizeof(preamble_));
- int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
- if (function_size > 0) {
- function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
- symbol_start));
- }
- return data;
+void CompleteParserRecorder::LogOneByteSymbol(int start,
+ Vector<const uint8_t> literal) {
+ ASSERT(should_log_symbols_);
+ int hash = vector_hash(literal);
+ LogSymbol(start, hash, true, literal);
}
-// ----------------------------------------------------------------------------
-// CompleteParserRecorder - Record both function entries and symbols.
-
-CompleteParserRecorder::CompleteParserRecorder()
- : FunctionLoggingParserRecorder(),
- literal_chars_(0),
- symbol_store_(0),
- symbol_keys_(0),
- string_table_(vector_compare),
- symbol_id_(0) {
+void CompleteParserRecorder::LogTwoByteSymbol(int start,
+ Vector<const uint16_t> literal) {
+ ASSERT(should_log_symbols_);
+ int hash = vector_hash(literal);
+ LogSymbol(start, hash, false, Vector<const byte>::cast(literal));
}
void CompleteParserRecorder::LogSymbol(int start,
int hash,
- bool is_ascii,
+ bool is_one_byte,
Vector<const byte> literal_bytes) {
- Key key = { is_ascii, literal_bytes };
+ Key key = { is_one_byte, literal_bytes };
HashMap::Entry* entry = string_table_.Lookup(&key, hash, true);
int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
if (id == 0) {
@@ -167,16 +179,26 @@ Vector<unsigned> CompleteParserRecorder::ExtractData() {
void CompleteParserRecorder::WriteNumber(int number) {
+ // Split the number into chunks of 7 bits. Write them one after another (the
+ // most significant first). Use the MSB of each byte for signalling that the
+ // number continues. See ScriptDataImpl::ReadNumber for the reading side.
ASSERT(number >= 0);
int mask = (1 << 28) - 1;
- for (int i = 28; i > 0; i -= 7) {
- if (number > mask) {
- symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u);
- number &= mask;
- }
+ int i = 28;
+ // 26 million symbols ought to be enough for anybody.
+ ASSERT(number <= mask);
+ while (number < mask) {
+ mask >>= 7;
+ i -= 7;
+ }
+ while (i > 0) {
+ symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u);
+ number &= mask;
mask >>= 7;
+ i -= 7;
}
+ ASSERT(number < (1 << 7));
symbol_store_.Add(static_cast<byte>(number));
}
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h
index 3a1e99d5d1..6a968e3b22 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/preparse-data.h
@@ -35,13 +35,11 @@
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// ParserRecorder - Logging of preparser data.
// Abstract interface for preparse data recorder.
class ParserRecorder {
public:
- ParserRecorder() { }
+ ParserRecorder() : should_log_symbols_(false) { }
virtual ~ParserRecorder() { }
// Logs the scope and some details of a function literal in the source.
@@ -49,11 +47,7 @@ class ParserRecorder {
int end,
int literals,
int properties,
- LanguageMode language_mode) = 0;
-
- // Logs a symbol creation of a literal or identifier.
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
+ StrictMode strict_mode) = 0;
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
@@ -63,38 +57,121 @@ class ParserRecorder {
const char* message,
const char* argument_opt) = 0;
- virtual int function_position() = 0;
+ // Logs a symbol creation of a literal or identifier.
+ bool ShouldLogSymbols() { return should_log_symbols_; }
+ // The following functions are only callable on CompleteParserRecorder
+ // and are guarded by calls to ShouldLogSymbols.
+ virtual void LogOneByteSymbol(int start, Vector<const uint8_t> literal) {
+ UNREACHABLE();
+ }
+ virtual void LogTwoByteSymbol(int start, Vector<const uint16_t> literal) {
+ UNREACHABLE();
+ }
+ virtual void PauseRecording() { UNREACHABLE(); }
+ virtual void ResumeRecording() { UNREACHABLE(); }
- virtual int symbol_position() = 0;
+ protected:
+ bool should_log_symbols_;
- virtual int symbol_ids() = 0;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ParserRecorder);
+};
- virtual Vector<unsigned> ExtractData() = 0;
- virtual void PauseRecording() = 0;
+class SingletonLogger : public ParserRecorder {
+ public:
+ SingletonLogger() : has_error_(false), start_(-1), end_(-1) { }
+ virtual ~SingletonLogger() { }
- virtual void ResumeRecording() = 0;
-};
+ void Reset() { has_error_ = false; }
+
+ virtual void LogFunction(int start,
+ int end,
+ int literals,
+ int properties,
+ StrictMode strict_mode) {
+ ASSERT(!has_error_);
+ start_ = start;
+ end_ = end;
+ literals_ = literals;
+ properties_ = properties;
+ strict_mode_ = strict_mode;
+ };
+
+ // Logs an error message and marks the log as containing an error.
+ // Further logging will be ignored, and ExtractData will return a vector
+ // representing the error only.
+ virtual void LogMessage(int start,
+ int end,
+ const char* message,
+ const char* argument_opt) {
+ if (has_error_) return;
+ has_error_ = true;
+ start_ = start;
+ end_ = end;
+ message_ = message;
+ argument_opt_ = argument_opt;
+ }
+
+ bool has_error() { return has_error_; }
+
+ int start() { return start_; }
+ int end() { return end_; }
+ int literals() {
+ ASSERT(!has_error_);
+ return literals_;
+ }
+ int properties() {
+ ASSERT(!has_error_);
+ return properties_;
+ }
+ StrictMode strict_mode() {
+ ASSERT(!has_error_);
+ return strict_mode_;
+ }
+ const char* message() {
+ ASSERT(has_error_);
+ return message_;
+ }
+ const char* argument_opt() {
+ ASSERT(has_error_);
+ return argument_opt_;
+ }
+ private:
+ bool has_error_;
+ int start_;
+ int end_;
+ // For function entries.
+ int literals_;
+ int properties_;
+ StrictMode strict_mode_;
+ // For error messages.
+ const char* message_;
+ const char* argument_opt_;
+};
-// ----------------------------------------------------------------------------
-// FunctionLoggingParserRecorder - Record only function entries
-class FunctionLoggingParserRecorder : public ParserRecorder {
+class CompleteParserRecorder : public ParserRecorder {
public:
- FunctionLoggingParserRecorder();
- virtual ~FunctionLoggingParserRecorder() {}
+ struct Key {
+ bool is_one_byte;
+ Vector<const byte> literal_bytes;
+ };
+
+ CompleteParserRecorder();
+ virtual ~CompleteParserRecorder() {}
virtual void LogFunction(int start,
int end,
int literals,
int properties,
- LanguageMode language_mode) {
+ StrictMode strict_mode) {
function_store_.Add(start);
function_store_.Add(end);
function_store_.Add(literals);
function_store_.Add(properties);
- function_store_.Add(language_mode);
+ function_store_.Add(strict_mode);
}
// Logs an error message and marks the log as containing an error.
@@ -105,118 +182,44 @@ class FunctionLoggingParserRecorder : public ParserRecorder {
const char* message,
const char* argument_opt);
- virtual int function_position() { return function_store_.size(); }
-
-
- virtual Vector<unsigned> ExtractData() = 0;
-
virtual void PauseRecording() {
- pause_count_++;
- is_recording_ = false;
+ ASSERT(should_log_symbols_);
+ should_log_symbols_ = false;
}
virtual void ResumeRecording() {
- ASSERT(pause_count_ > 0);
- if (--pause_count_ == 0) is_recording_ = !has_error();
+ ASSERT(!should_log_symbols_);
+ should_log_symbols_ = !has_error();
}
- protected:
+ virtual void LogOneByteSymbol(int start, Vector<const uint8_t> literal);
+ virtual void LogTwoByteSymbol(int start, Vector<const uint16_t> literal);
+ Vector<unsigned> ExtractData();
+
+ private:
bool has_error() {
return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]);
}
- bool is_recording() {
- return is_recording_;
- }
-
void WriteString(Vector<const char> str);
+ // For testing. Defined in test-parsing.cc.
+ friend struct CompleteParserRecorderFriend;
+
+ void LogSymbol(int start,
+ int hash,
+ bool is_one_byte,
+ Vector<const byte> literal);
+
+ // Write a non-negative number to the symbol store.
+ void WriteNumber(int number);
+
Collector<unsigned> function_store_;
unsigned preamble_[PreparseDataConstants::kHeaderSize];
- bool is_recording_;
- int pause_count_;
#ifdef DEBUG
int prev_start_;
#endif
-};
-
-
-// ----------------------------------------------------------------------------
-// PartialParserRecorder - Record only function entries
-
-class PartialParserRecorder : public FunctionLoggingParserRecorder {
- public:
- PartialParserRecorder() : FunctionLoggingParserRecorder() { }
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
- virtual ~PartialParserRecorder() { }
- virtual Vector<unsigned> ExtractData();
- virtual int symbol_position() { return 0; }
- virtual int symbol_ids() { return 0; }
-};
-
-
-// ----------------------------------------------------------------------------
-// CompleteParserRecorder - Record both function entries and symbols.
-
-class CompleteParserRecorder: public FunctionLoggingParserRecorder {
- public:
- CompleteParserRecorder();
- virtual ~CompleteParserRecorder() { }
-
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) {
- if (!is_recording_) return;
- int hash = vector_hash(literal);
- LogSymbol(start, hash, true, Vector<const byte>::cast(literal));
- }
-
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) {
- if (!is_recording_) return;
- int hash = vector_hash(literal);
- LogSymbol(start, hash, false, Vector<const byte>::cast(literal));
- }
-
- virtual Vector<unsigned> ExtractData();
-
- virtual int symbol_position() { return symbol_store_.size(); }
- virtual int symbol_ids() { return symbol_id_; }
-
- private:
- struct Key {
- bool is_ascii;
- Vector<const byte> literal_bytes;
- };
-
- virtual void LogSymbol(int start,
- int hash,
- bool is_ascii,
- Vector<const byte> literal);
-
- template <typename Char>
- static int vector_hash(Vector<const Char> string) {
- int hash = 0;
- for (int i = 0; i < string.length(); i++) {
- int c = static_cast<int>(string[i]);
- hash += c;
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
- return hash;
- }
-
- static bool vector_compare(void* a, void* b) {
- Key* string1 = reinterpret_cast<Key*>(a);
- Key* string2 = reinterpret_cast<Key*>(b);
- if (string1->is_ascii != string2->is_ascii) return false;
- int length = string1->literal_bytes.length();
- if (string2->literal_bytes.length() != length) return false;
- return memcmp(string1->literal_bytes.start(),
- string2->literal_bytes.start(), length) == 0;
- }
-
- // Write a non-negative number to the symbol store.
- void WriteNumber(int number);
Collector<byte> literal_chars_;
Collector<byte> symbol_store_;
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index fa6f217993..9bcc88002d 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -55,14 +55,107 @@ int isfinite(double value);
namespace v8 {
namespace internal {
+
+void PreParserTraits::CheckStrictModeLValue(PreParserExpression expression,
+ bool* ok) {
+ if (expression.IsIdentifier() &&
+ expression.AsIdentifier().IsEvalOrArguments()) {
+ pre_parser_->ReportMessage("strict_eval_arguments",
+ Vector<const char*>::empty());
+ *ok = false;
+ }
+}
+
+
+void PreParserTraits::ReportMessageAt(Scanner::Location location,
+ const char* message,
+ Vector<const char*> args,
+ bool is_reference_error) {
+ ReportMessageAt(location.beg_pos,
+ location.end_pos,
+ message,
+ args.length() > 0 ? args[0] : NULL,
+ is_reference_error);
+}
+
+
+void PreParserTraits::ReportMessageAt(Scanner::Location location,
+ const char* type,
+ const char* name_opt,
+ bool is_reference_error) {
+ pre_parser_->log_
+ ->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
+}
+
+
+void PreParserTraits::ReportMessageAt(int start_pos,
+ int end_pos,
+ const char* type,
+ const char* name_opt,
+ bool is_reference_error) {
+ pre_parser_->log_->LogMessage(start_pos, end_pos, type, name_opt);
+}
+
+
+PreParserIdentifier PreParserTraits::GetSymbol(Scanner* scanner) {
+ pre_parser_->LogSymbol();
+ if (scanner->current_token() == Token::FUTURE_RESERVED_WORD) {
+ return PreParserIdentifier::FutureReserved();
+ } else if (scanner->current_token() ==
+ Token::FUTURE_STRICT_RESERVED_WORD) {
+ return PreParserIdentifier::FutureStrictReserved();
+ } else if (scanner->current_token() == Token::YIELD) {
+ return PreParserIdentifier::Yield();
+ }
+ if (scanner->UnescapedLiteralMatches("eval", 4)) {
+ return PreParserIdentifier::Eval();
+ }
+ if (scanner->UnescapedLiteralMatches("arguments", 9)) {
+ return PreParserIdentifier::Arguments();
+ }
+ return PreParserIdentifier::Default();
+}
+
+
+PreParserExpression PreParserTraits::ExpressionFromString(
+ int pos, Scanner* scanner, PreParserFactory* factory) {
+ pre_parser_->LogSymbol();
+ if (scanner->UnescapedLiteralMatches("use strict", 10)) {
+ return PreParserExpression::UseStrictStringLiteral();
+ }
+ return PreParserExpression::StringLiteral();
+}
+
+
+PreParserExpression PreParserTraits::ParseV8Intrinsic(bool* ok) {
+ return pre_parser_->ParseV8Intrinsic(ok);
+}
+
+
+PreParserExpression PreParserTraits::ParseFunctionLiteral(
+ PreParserIdentifier name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ bool* ok) {
+ return pre_parser_->ParseFunctionLiteral(
+ name, function_name_location, name_is_strict_reserved, is_generator,
+ function_token_position, type, ok);
+}
+
+
PreParser::PreParseResult PreParser::PreParseLazyFunction(
- LanguageMode mode, bool is_generator, ParserRecorder* log) {
+ StrictMode strict_mode, bool is_generator, ParserRecorder* log) {
log_ = log;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
- Scope top_scope(&scope_, kTopLevelScope);
- set_language_mode(mode);
- Scope function_scope(&scope_, kFunctionScope);
- function_scope.set_is_generator(is_generator);
+ PreParserScope top_scope(scope_, GLOBAL_SCOPE);
+ FunctionState top_state(&function_state_, &scope_, &top_scope);
+ scope_->SetStrictMode(strict_mode);
+ PreParserScope function_scope(scope_, FUNCTION_SCOPE);
+ FunctionState function_state(&function_state_, &scope_, &function_scope);
+ function_state.set_is_generator(is_generator);
ASSERT_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
int start_position = peek_position();
@@ -72,7 +165,7 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
ReportUnexpectedToken(scanner()->current_token());
} else {
ASSERT_EQ(Token::RBRACE, scanner()->peek());
- if (!scope_->is_classic_mode()) {
+ if (scope_->strict_mode() == STRICT) {
int end_pos = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_pos, &ok);
}
@@ -139,8 +232,7 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
Statement statement = ParseSourceElement(CHECK_OK);
if (directive_prologue) {
if (statement.IsUseStrictLiteral()) {
- set_language_mode(allow_harmony_scoping() ?
- EXTENDED_MODE : STRICT_MODE);
+ scope_->SetStrictMode(STRICT);
} else if (!statement.IsStringLiteral()) {
directive_prologue = false;
}
@@ -234,9 +326,11 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
Scanner::Location start_location = scanner()->peek_location();
Statement statement = ParseFunctionDeclaration(CHECK_OK);
Scanner::Location end_location = scanner()->location();
- if (!scope_->is_classic_mode()) {
- ReportMessageAt(start_location.beg_pos, end_location.end_pos,
- "strict_function", NULL);
+ if (strict_mode() == STRICT) {
+ PreParserTraits::ReportMessageAt(start_location.beg_pos,
+ end_location.end_pos,
+ "strict_function",
+ NULL);
*ok = false;
return Statement::Default();
} else {
@@ -260,7 +354,7 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
// 'function' '*' Identifier '(' FormalParameterListopt ')'
// '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
-
+ int pos = position();
bool is_generator = allow_generators() && Check(Token::MUL);
bool is_strict_reserved = false;
Identifier name = ParseIdentifierOrStrictReservedWord(
@@ -269,6 +363,8 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
scanner()->location(),
is_strict_reserved,
is_generator,
+ pos,
+ FunctionLiteral::DECLARATION,
CHECK_OK);
return Statement::FunctionDeclaration();
}
@@ -283,7 +379,7 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
//
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
- if (is_extended_mode()) {
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
ParseSourceElement(CHECK_OK);
} else {
ParseStatement(CHECK_OK);
@@ -343,30 +439,24 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
//
- // However disallowing const in classic mode will break compatibility with
+ // However disallowing const in sloppy mode will break compatibility with
// existing pages. Therefore we keep allowing const with the old
- // non-harmony semantics in classic mode.
+ // non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- switch (language_mode()) {
- case CLASSIC_MODE:
- break;
- case STRICT_MODE: {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location, "strict_const", NULL);
- *ok = false;
- return Statement::Default();
- }
- case EXTENDED_MODE:
- if (var_context != kSourceElement &&
- var_context != kForStatement) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unprotected_const", NULL);
+ if (strict_mode() == STRICT) {
+ if (allow_harmony_scoping()) {
+ if (var_context != kSourceElement && var_context != kForStatement) {
+ ReportMessageAt(scanner()->peek_location(), "unprotected_const");
*ok = false;
return Statement::Default();
}
require_initializer = true;
- break;
+ } else {
+ Scanner::Location location = scanner()->peek_location();
+ ReportMessageAt(location, "strict_const");
+ *ok = false;
+ return Statement::Default();
+ }
}
} else if (peek() == Token::LET) {
// ES6 Draft Rev4 section 12.2.1:
@@ -375,19 +465,17 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
//
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
- if (!is_extended_mode()) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "illegal_let", NULL);
+ //
+ // TODO(rossberg): make 'let' a legal identifier in sloppy mode.
+ if (!allow_harmony_scoping() || strict_mode() == SLOPPY) {
+ ReportMessageAt(scanner()->peek_location(), "illegal_let");
*ok = false;
return Statement::Default();
}
Consume(Token::LET);
if (var_context != kSourceElement &&
var_context != kForStatement) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unprotected_let", NULL);
+ ReportMessageAt(scanner()->peek_location(), "unprotected_let");
*ok = false;
return Statement::Default();
}
@@ -432,7 +520,7 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
// Expression is a single identifier, and not, e.g., a parenthesized
// identifier.
ASSERT(!expr.AsIdentifier().IsFutureReserved());
- ASSERT(scope_->is_classic_mode() ||
+ ASSERT(strict_mode() == SLOPPY ||
(!expr.AsIdentifier().IsFutureStrictReserved() &&
!expr.AsIdentifier().IsYield()));
Consume(Token::COLON);
@@ -530,9 +618,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
- if (!scope_->is_classic_mode()) {
- Scanner::Location location = scanner()->location();
- ReportMessageAt(location, "strict_mode_with", NULL);
+ if (strict_mode() == STRICT) {
+ ReportMessageAt(scanner()->location(), "strict_mode_with");
*ok = false;
return Statement::Default();
}
@@ -540,7 +627,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Scope::InsideWith iw(scope_);
+ PreParserScope with_scope(scope_, WITH_SCOPE);
+ BlockState block_state(&scope_, &with_scope);
ParseStatement(CHECK_OK);
return Statement::Default();
}
@@ -676,8 +764,7 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
if (scanner()->HasAnyLineTerminatorBeforeNext()) {
- Scanner::Location pos = scanner()->location();
- ReportMessageAt(pos, "newline_after_throw", NULL);
+ ReportMessageAt(scanner()->location(), "newline_after_throw");
*ok = false;
return Statement::Default();
}
@@ -705,7 +792,7 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
- ReportMessageAt(scanner()->location(), "no_catch_or_finally", NULL);
+ ReportMessageAt(scanner()->location(), "no_catch_or_finally");
*ok = false;
return Statement::Default();
}
@@ -714,7 +801,9 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
Expect(Token::LPAREN, CHECK_OK);
ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- { Scope::InsideWith iw(scope_);
+ {
+ PreParserScope with_scope(scope_, WITH_SCOPE);
+ BlockState block_state(&scope_, &with_scope);
ParseBlock(CHECK_OK);
}
tok = peek();
@@ -748,561 +837,22 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
#undef DUMMY
-// Precedence = 1
-PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
- // Expression ::
- // AssignmentExpression
- // Expression ',' AssignmentExpression
-
- Expression result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == Token::COMMA) {
- Expect(Token::COMMA, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = Expression::Default();
- }
- return result;
-}
-
-
-// Precedence = 2
-PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
- bool* ok) {
- // AssignmentExpression ::
- // ConditionalExpression
- // YieldExpression
- // LeftHandSideExpression AssignmentOperator AssignmentExpression
-
- if (scope_->is_generator() && peek() == Token::YIELD) {
- return ParseYieldExpression(ok);
- }
-
- Scanner::Location before = scanner()->peek_location();
- Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
-
- if (!Token::IsAssignmentOp(peek())) {
- // Parsed conditional expression only (no assignment).
- return expression;
- }
-
- if (!scope_->is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_eval_arguments", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- Token::Value op = Next(); // Get assignment operator.
- ParseAssignmentExpression(accept_IN, CHECK_OK);
-
- if ((op == Token::ASSIGN) && expression.IsThisProperty()) {
- scope_->AddProperty();
- }
-
- return Expression::Default();
-}
-
-
-// Precedence = 3
-PreParser::Expression PreParser::ParseYieldExpression(bool* ok) {
- // YieldExpression ::
- // 'yield' '*'? AssignmentExpression
- Consume(Token::YIELD);
- Check(Token::MUL);
-
- ParseAssignmentExpression(false, CHECK_OK);
-
- return Expression::Default();
-}
-
-
-// Precedence = 3
-PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
- bool* ok) {
- // ConditionalExpression ::
- // LogicalOrExpression
- // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
- // We start using the binary expression parser for prec >= 4 only!
- Expression expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != Token::CONDITIONAL) return expression;
- Consume(Token::CONDITIONAL);
- // In parsing the first assignment expression in conditional
- // expressions we always accept the 'in' keyword; see ECMA-262,
- // section 11.12, page 58.
- ParseAssignmentExpression(true, CHECK_OK);
- Expect(Token::COLON, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- return Expression::Default();
-}
-
-
-// Precedence >= 4
-PreParser::Expression PreParser::ParseBinaryExpression(int prec,
- bool accept_IN,
- bool* ok) {
- Expression result = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
- // prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- Next();
- ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
- result = Expression::Default();
- }
- }
- return result;
-}
-
-
-PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
-
- Token::Value op = peek();
- if (Token::IsUnaryOp(op)) {
- op = Next();
- ParseUnaryExpression(ok);
- return Expression::Default();
- } else if (Token::IsCountOp(op)) {
- op = Next();
- Scanner::Location before = scanner()->peek_location();
- Expression expression = ParseUnaryExpression(CHECK_OK);
- if (!scope_->is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_eval_arguments", NULL);
- *ok = false;
- }
- return Expression::Default();
- } else {
- return ParsePostfixExpression(ok);
- }
-}
-
-
-PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
- // PostfixExpression ::
- // LeftHandSideExpression ('++' | '--')?
-
- Scanner::Location before = scanner()->peek_location();
- Expression expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
- Token::IsCountOp(peek())) {
- if (!scope_->is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_eval_arguments", NULL);
- *ok = false;
- return Expression::Default();
- }
- Next();
- return Expression::Default();
- }
- return expression;
-}
-
-
-PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
- // LeftHandSideExpression ::
- // (NewExpression | MemberExpression) ...
-
- Expression result = Expression::Default();
- if (peek() == Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RBRACK, CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
-
- case Token::LPAREN: {
- ParseArguments(CHECK_OK);
- result = Expression::Default();
- break;
- }
-
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- ParseIdentifierName(CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
-
- default:
- return result;
- }
- }
-}
-
-
-PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
- // NewExpression ::
- // ('new')+ MemberExpression
-
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- unsigned new_count = 0;
- do {
- Consume(Token::NEW);
- new_count++;
- } while (peek() == Token::NEW);
-
- return ParseMemberWithNewPrefixesExpression(new_count, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(0, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
- unsigned new_count, bool* ok) {
- // MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
-
- // Parse the initial primary or function expression.
- Expression result = Expression::Default();
- if (peek() == Token::FUNCTION) {
- Consume(Token::FUNCTION);
-
- bool is_generator = allow_generators() && Check(Token::MUL);
- Identifier name = Identifier::Default();
- bool is_strict_reserved_name = false;
- Scanner::Location function_name_location = Scanner::Location::invalid();
- if (peek_any_identifier()) {
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
- CHECK_OK);
- function_name_location = scanner()->location();
- }
- result = ParseFunctionLiteral(name,
- function_name_location,
- is_strict_reserved_name,
- is_generator,
- CHECK_OK);
- } else {
- result = ParsePrimaryExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RBRACK, CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- ParseIdentifierName(CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
- case Token::LPAREN: {
- if (new_count == 0) return result;
- // Consume one of the new prefixes (already parsed).
- ParseArguments(CHECK_OK);
- new_count--;
- result = Expression::Default();
- break;
- }
- default:
- return result;
- }
- }
-}
-
-
-PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- Expression result = Expression::Default();
- switch (peek()) {
- case Token::THIS: {
- Next();
- result = Expression::This();
- break;
- }
-
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::YIELD:
- case Token::IDENTIFIER: {
- // Using eval or arguments in this context is OK even in strict mode.
- Identifier id = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
- result = Expression::FromIdentifier(id);
- break;
- }
-
- case Token::NULL_LITERAL:
- case Token::TRUE_LITERAL:
- case Token::FALSE_LITERAL:
- case Token::NUMBER: {
- Next();
- break;
- }
- case Token::STRING: {
- Next();
- result = GetStringSymbol();
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- break;
-
- case Token::MOD:
- result = ParseV8Intrinsic(CHECK_OK);
- break;
-
- default: {
- Token::Value next = Next();
- ReportUnexpectedToken(next);
- *ok = false;
- return Expression::Default();
- }
- }
-
- return result;
-}
-
-
-PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
- // ArrayLiteral ::
- // '[' Expression? (',' Expression?)* ']'
- Expect(Token::LBRACK, CHECK_OK);
- while (peek() != Token::RBRACK) {
- if (peek() != Token::COMMA) {
- ParseAssignmentExpression(true, CHECK_OK);
- }
- if (peek() != Token::RBRACK) {
- Expect(Token::COMMA, CHECK_OK);
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
-
- scope_->NextMaterializedLiteralIndex();
- return Expression::Default();
-}
-
-
-PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
- // ObjectLiteral ::
- // '{' (
- // ((IdentifierName | String | Number) ':' AssignmentExpression)
- // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // )*[','] '}'
-
- ObjectLiteralChecker checker(this, language_mode());
-
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- Token::Value next = peek();
- switch (next) {
- case Token::IDENTIFIER:
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD: {
- bool is_getter = false;
- bool is_setter = false;
- ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if ((is_getter || is_setter) && peek() != Token::COLON) {
- Token::Value name = Next();
- bool is_keyword = Token::IsKeyword(name);
- if (name != Token::IDENTIFIER &&
- name != Token::FUTURE_RESERVED_WORD &&
- name != Token::FUTURE_STRICT_RESERVED_WORD &&
- name != Token::NUMBER &&
- name != Token::STRING &&
- !is_keyword) {
- *ok = false;
- return Expression::Default();
- }
- if (!is_keyword) {
- LogSymbol();
- }
- PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
- checker.CheckProperty(name, type, CHECK_OK);
- ParseFunctionLiteral(Identifier::Default(),
- scanner()->location(),
- false, // reserved words are allowed here
- false, // not a generator
- CHECK_OK);
- if (peek() != Token::RBRACE) {
- Expect(Token::COMMA, CHECK_OK);
- }
- continue; // restart the while
- }
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- break;
- }
- case Token::STRING:
- Consume(next);
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- GetStringSymbol();
- break;
- case Token::NUMBER:
- Consume(next);
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- break;
- default:
- if (Token::IsKeyword(next)) {
- Consume(next);
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- } else {
- // Unexpected token.
- *ok = false;
- return Expression::Default();
- }
- }
-
- Expect(Token::COLON, CHECK_OK);
- ParseAssignmentExpression(true, CHECK_OK);
-
- // TODO(1240767): Consider allowing trailing comma.
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
- }
- Expect(Token::RBRACE, CHECK_OK);
-
- scope_->NextMaterializedLiteralIndex();
- return Expression::Default();
-}
-
-
-PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
- bool* ok) {
- if (!scanner()->ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessageAt(scanner()->location(), "unterminated_regexp", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- scope_->NextMaterializedLiteralIndex();
-
- if (!scanner()->ScanRegExpFlags()) {
- Next();
- ReportMessageAt(scanner()->location(), "invalid_regexp_flags", NULL);
- *ok = false;
- return Expression::Default();
- }
- Next();
- return Expression::Default();
-}
-
-
-PreParser::Arguments PreParser::ParseArguments(bool* ok) {
- // Arguments ::
- // '(' (AssignmentExpression)*[','] ')'
-
- Expect(Token::LPAREN, ok);
- if (!*ok) return -1;
- bool done = (peek() == Token::RPAREN);
- int argc = 0;
- while (!done) {
- ParseAssignmentExpression(true, ok);
- if (!*ok) return -1;
- argc++;
- done = (peek() == Token::RPAREN);
- if (!done) {
- Expect(Token::COMMA, ok);
- if (!*ok) return -1;
- }
- }
- Expect(Token::RPAREN, ok);
- return argc;
-}
-
PreParser::Expression PreParser::ParseFunctionLiteral(
Identifier function_name,
Scanner::Location function_name_location,
bool name_is_strict_reserved,
bool is_generator,
+ int function_token_pos,
+ FunctionLiteral::FunctionType function_type,
bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
// Parse function body.
ScopeType outer_scope_type = scope_->type();
- bool inside_with = scope_->IsInsideWith();
- Scope function_scope(&scope_, kFunctionScope);
- function_scope.set_is_generator(is_generator);
+ PreParserScope function_scope(scope_, FUNCTION_SCOPE);
+ FunctionState function_state(&function_state_, &scope_, &function_scope);
+ function_state.set_is_generator(is_generator);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
@@ -1326,14 +876,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
reserved_error_loc = scanner()->location();
}
- int prev_value;
- if (scanner()->is_literal_ascii()) {
- prev_value =
- duplicate_finder.AddAsciiSymbol(scanner()->literal_ascii_string(), 1);
- } else {
- prev_value =
- duplicate_finder.AddUtf16Symbol(scanner()->literal_utf16_string(), 1);
- }
+ int prev_value = scanner()->FindSymbol(&duplicate_finder, 1);
if (!dupe_error_loc.IsValid() && prev_value != 0) {
dupe_error_loc = scanner()->location();
@@ -1346,16 +889,14 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
}
Expect(Token::RPAREN, CHECK_OK);
- // Determine if the function will be lazily compiled.
- // Currently only happens to top-level functions.
- // Optimistically assume that all top-level functions are lazily compiled.
- bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
- !inside_with && allow_lazy() &&
- !parenthesized_function_);
+ // See Parser::ParseFunctionLiteral for more information about lazy parsing
+ // and lazy compilation.
+ bool is_lazily_parsed = (outer_scope_type == GLOBAL_SCOPE && allow_lazy() &&
+ !parenthesized_function_);
parenthesized_function_ = false;
Expect(Token::LBRACE, CHECK_OK);
- if (is_lazily_compiled) {
+ if (is_lazily_parsed) {
ParseLazyFunctionLiteralBody(CHECK_OK);
} else {
ParseSourceElements(Token::RBRACE, ok);
@@ -1364,40 +905,35 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// Validate strict mode. We can do this only after parsing the function,
// since the function can declare itself strict.
- if (!scope_->is_classic_mode()) {
+ if (strict_mode() == STRICT) {
if (function_name.IsEvalOrArguments()) {
- ReportMessageAt(function_name_location, "strict_eval_arguments", NULL);
+ ReportMessageAt(function_name_location, "strict_eval_arguments");
*ok = false;
return Expression::Default();
}
if (name_is_strict_reserved) {
- ReportMessageAt(
- function_name_location, "unexpected_strict_reserved", NULL);
+ ReportMessageAt(function_name_location, "unexpected_strict_reserved");
*ok = false;
return Expression::Default();
}
if (eval_args_error_loc.IsValid()) {
- ReportMessageAt(eval_args_error_loc, "strict_eval_arguments",
- Vector<const char*>::empty());
+ ReportMessageAt(eval_args_error_loc, "strict_eval_arguments");
*ok = false;
return Expression::Default();
}
if (dupe_error_loc.IsValid()) {
- ReportMessageAt(dupe_error_loc, "strict_param_dupe",
- Vector<const char*>::empty());
+ ReportMessageAt(dupe_error_loc, "strict_param_dupe");
*ok = false;
return Expression::Default();
}
if (reserved_error_loc.IsValid()) {
- ReportMessageAt(reserved_error_loc, "unexpected_strict_reserved",
- Vector<const char*>::empty());
+ ReportMessageAt(reserved_error_loc, "unexpected_strict_reserved");
*ok = false;
return Expression::Default();
}
int end_position = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_position, CHECK_OK);
- return Expression::StrictFunction();
}
return Expression::Default();
@@ -1406,18 +942,19 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
int body_start = position();
- log_->PauseRecording();
+ bool is_logging = log_->ShouldLogSymbols();
+ if (is_logging) log_->PauseRecording();
ParseSourceElements(Token::RBRACE, ok);
- log_->ResumeRecording();
+ if (is_logging) log_->ResumeRecording();
if (!*ok) return;
// Position right after terminal '}'.
ASSERT_EQ(Token::RBRACE, scanner()->peek());
int body_end = scanner()->peek_location().end_pos;
log_->LogFunction(body_start, body_end,
- scope_->materialized_literal_count(),
- scope_->expected_properties(),
- language_mode());
+ function_state_->materialized_literal_count(),
+ function_state_->expected_property_count(),
+ strict_mode());
}
@@ -1440,166 +977,10 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
void PreParser::LogSymbol() {
- int identifier_pos = position();
- if (scanner()->is_literal_ascii()) {
- log_->LogAsciiSymbol(identifier_pos, scanner()->literal_ascii_string());
- } else {
- log_->LogUtf16Symbol(identifier_pos, scanner()->literal_utf16_string());
+ if (log_->ShouldLogSymbols()) {
+ scanner()->LogSymbol(log_, position());
}
}
-PreParser::Expression PreParser::GetStringSymbol() {
- const int kUseStrictLength = 10;
- const char* kUseStrictChars = "use strict";
- LogSymbol();
- if (scanner()->is_literal_ascii() &&
- scanner()->literal_length() == kUseStrictLength &&
- !scanner()->literal_contains_escapes() &&
- !strncmp(scanner()->literal_ascii_string().start(), kUseStrictChars,
- kUseStrictLength)) {
- return Expression::UseStrictStringLiteral();
- }
- return Expression::StringLiteral();
-}
-
-
-PreParser::Identifier PreParser::GetIdentifierSymbol() {
- LogSymbol();
- if (scanner()->current_token() == Token::FUTURE_RESERVED_WORD) {
- return Identifier::FutureReserved();
- } else if (scanner()->current_token() ==
- Token::FUTURE_STRICT_RESERVED_WORD) {
- return Identifier::FutureStrictReserved();
- } else if (scanner()->current_token() == Token::YIELD) {
- return Identifier::Yield();
- }
- if (scanner()->is_literal_ascii()) {
- // Detect strict-mode poison words.
- if (scanner()->literal_length() == 4 &&
- !strncmp(scanner()->literal_ascii_string().start(), "eval", 4)) {
- return Identifier::Eval();
- }
- if (scanner()->literal_length() == 9 &&
- !strncmp(scanner()->literal_ascii_string().start(), "arguments", 9)) {
- return Identifier::Arguments();
- }
- }
- return Identifier::Default();
-}
-
-
-// Parses an identifier that is valid for the current scope, in particular it
-// fails on strict mode future reserved keywords in a strict scope. If
-// allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
-// "arguments" as identifier even in strict mode (this is needed in cases like
-// "var foo = eval;").
-PreParser::Identifier PreParser::ParseIdentifier(
- AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
- bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- PreParser::Identifier name = GetIdentifierSymbol();
- if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
- !scope_->is_classic_mode() && name.IsEvalOrArguments()) {
- ReportMessageAt(scanner()->location(), "strict_eval_arguments", NULL);
- *ok = false;
- }
- return name;
- } else if (scope_->is_classic_mode() &&
- (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !scope_->is_generator()))) {
- return GetIdentifierSymbol();
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Identifier::Default();
- }
-}
-
-
-// Parses and identifier or a strict mode future reserved word, and indicate
-// whether it is strict mode future reserved.
-PreParser::Identifier PreParser::ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- *is_strict_reserved = false;
- } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !scope_->is_generator())) {
- *is_strict_reserved = true;
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Identifier::Default();
- }
- return GetIdentifierSymbol();
-}
-
-
-PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
- Token::Value next = Next();
- if (next != Token::IDENTIFIER &&
- next != Token::FUTURE_RESERVED_WORD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD &&
- !Token::IsKeyword(next)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return Identifier::Default();
- }
- return GetIdentifierSymbol();
-}
-
-#undef CHECK_OK
-
-
-// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'.
-PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Identifier result = ParseIdentifierName(ok);
- if (!*ok) return Identifier::Default();
- if (scanner()->is_literal_ascii() &&
- scanner()->literal_length() == 3) {
- const char* token = scanner()->literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-
-void PreParser::ObjectLiteralChecker::CheckProperty(Token::Value property,
- PropertyKind type,
- bool* ok) {
- int old;
- if (property == Token::NUMBER) {
- old = finder_.AddNumber(scanner()->literal_ascii_string(), type);
- } else if (scanner()->is_literal_ascii()) {
- old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type);
- } else {
- old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type);
- }
- PropertyKind old_type = static_cast<PropertyKind>(old);
- if (HasConflict(old_type, type)) {
- if (IsDataDataConflict(old_type, type)) {
- // Both are data properties.
- if (language_mode_ == CLASSIC_MODE) return;
- parser()->ReportMessageAt(scanner()->location(),
- "strict_duplicate_property");
- } else if (IsDataAccessorConflict(old_type, type)) {
- // Both a data and an accessor property with the same name.
- parser()->ReportMessageAt(scanner()->location(),
- "accessor_data_property");
- } else {
- ASSERT(IsAccessorAccessorConflict(old_type, type));
- // Both accessors of the same type.
- parser()->ReportMessageAt(scanner()->location(),
- "accessor_get_set");
- }
- *ok = false;
- }
-}
-
} } // v8::internal
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index bcaab743e5..080b772873 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -28,26 +28,84 @@
#ifndef V8_PREPARSER_H
#define V8_PREPARSER_H
+#include "func-name-inferrer.h"
#include "hashmap.h"
+#include "scopes.h"
#include "token.h"
#include "scanner.h"
+#include "v8.h"
namespace v8 {
namespace internal {
-// Common base class shared between parser and pre-parser.
-class ParserBase {
+// Common base class shared between parser and pre-parser. Traits encapsulate
+// the differences between Parser and PreParser:
+
+// - Return types: For example, Parser functions return Expression* and
+// PreParser functions return PreParserExpression.
+
+// - Creating parse tree nodes: Parser generates an AST during the recursive
+// descent. PreParser doesn't create a tree. Instead, it passes around minimal
+// data objects (PreParserExpression, PreParserIdentifier etc.) which contain
+// just enough data for the upper layer functions. PreParserFactory is
+// responsible for creating these dummy objects. It provides a similar kind of
+// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
+// used.
+
+// - Miscellanous other tasks interleaved with the recursive descent. For
+// example, Parser keeps track of which function literals should be marked as
+// pretenured, and PreParser doesn't care.
+
+// The traits are expected to contain the following typedefs:
+// struct Traits {
+// // In particular...
+// struct Type {
+// // Used by FunctionState and BlockState.
+// typedef Scope;
+// typedef GeneratorVariable;
+// typedef Zone;
+// // Return types for traversing functions.
+// typedef Identifier;
+// typedef Expression;
+// typedef FunctionLiteral;
+// typedef ObjectLiteralProperty;
+// typedef Literal;
+// typedef ExpressionList;
+// typedef PropertyList;
+// // For constructing objects returned by the traversing functions.
+// typedef Factory;
+// };
+// // ...
+// };
+
+template <typename Traits>
+class ParserBase : public Traits {
public:
- ParserBase(Scanner* scanner, uintptr_t stack_limit)
- : scanner_(scanner),
+ // Shorten type names defined by Traits.
+ typedef typename Traits::Type::Expression ExpressionT;
+ typedef typename Traits::Type::Identifier IdentifierT;
+
+ ParserBase(Scanner* scanner, uintptr_t stack_limit,
+ v8::Extension* extension,
+ ParserRecorder* log,
+ typename Traits::Type::Zone* zone,
+ typename Traits::Type::Parser this_object)
+ : Traits(this_object),
+ parenthesized_function_(false),
+ scope_(NULL),
+ function_state_(NULL),
+ extension_(extension),
+ fni_(NULL),
+ log_(log),
+ mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
+ scanner_(scanner),
stack_limit_(stack_limit),
stack_overflow_(false),
allow_lazy_(false),
allow_natives_syntax_(false),
allow_generators_(false),
- allow_for_of_(false) { }
- // TODO(mstarzinger): Only virtual until message reporting has been unified.
- virtual ~ParserBase() { }
+ allow_for_of_(false),
+ zone_(zone) { }
// Getters that indicate whether certain syntactical constructs are
// allowed to be parsed by this instance of the parser.
@@ -81,13 +139,125 @@ class ParserBase {
kDontAllowEvalOrArguments
};
+ enum Mode {
+ PARSE_LAZILY,
+ PARSE_EAGERLY
+ };
+
+ // ---------------------------------------------------------------------------
+ // FunctionState and BlockState together implement the parser's scope stack.
+ // The parser's current scope is in scope_. BlockState and FunctionState
+ // constructors push on the scope stack and the destructors pop. They are also
+ // used to hold the parser's per-function and per-block state.
+ class BlockState BASE_EMBEDDED {
+ public:
+ BlockState(typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope)
+ : scope_stack_(scope_stack),
+ outer_scope_(*scope_stack),
+ scope_(scope) {
+ *scope_stack_ = scope_;
+ }
+ ~BlockState() { *scope_stack_ = outer_scope_; }
+
+ private:
+ typename Traits::Type::Scope** scope_stack_;
+ typename Traits::Type::Scope* outer_scope_;
+ typename Traits::Type::Scope* scope_;
+ };
+
+ class FunctionState BASE_EMBEDDED {
+ public:
+ FunctionState(
+ FunctionState** function_state_stack,
+ typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope,
+ typename Traits::Type::Zone* zone = NULL);
+ ~FunctionState();
+
+ int NextMaterializedLiteralIndex() {
+ return next_materialized_literal_index_++;
+ }
+ int materialized_literal_count() {
+ return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
+ }
+
+ int NextHandlerIndex() { return next_handler_index_++; }
+ int handler_count() { return next_handler_index_; }
+
+ void AddProperty() { expected_property_count_++; }
+ int expected_property_count() { return expected_property_count_; }
+
+ void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
+ bool is_generator() const { return is_generator_; }
+
+ void set_generator_object_variable(
+ typename Traits::Type::GeneratorVariable* variable) {
+ ASSERT(variable != NULL);
+ ASSERT(!is_generator());
+ generator_object_variable_ = variable;
+ is_generator_ = true;
+ }
+ typename Traits::Type::GeneratorVariable* generator_object_variable()
+ const {
+ return generator_object_variable_;
+ }
+
+ typename Traits::Type::Factory* factory() { return &factory_; }
+
+ private:
+ // Used to assign an index to each literal that needs materialization in
+ // the function. Includes regexp literals, and boilerplate for object and
+ // array literals.
+ int next_materialized_literal_index_;
+
+ // Used to assign a per-function index to try and catch handlers.
+ int next_handler_index_;
+
+ // Properties count estimation.
+ int expected_property_count_;
+
+ // Whether the function is a generator.
+ bool is_generator_;
+ // For generators, this variable may hold the generator object. It variable
+ // is used by yield expressions and return statements. It is not necessary
+ // for generator functions to have this variable set.
+ Variable* generator_object_variable_;
+
+ FunctionState** function_state_stack_;
+ FunctionState* outer_function_state_;
+ typename Traits::Type::Scope** scope_stack_;
+ typename Traits::Type::Scope* outer_scope_;
+ Isolate* isolate_; // Only used by ParserTraits.
+ int saved_ast_node_id_; // Only used by ParserTraits.
+ typename Traits::Type::Factory factory_;
+
+ friend class ParserTraits;
+ };
+
+ class ParsingModeScope BASE_EMBEDDED {
+ public:
+ ParsingModeScope(ParserBase* parser, Mode mode)
+ : parser_(parser),
+ old_mode_(parser->mode()) {
+ parser_->mode_ = mode;
+ }
+ ~ParsingModeScope() {
+ parser_->mode_ = old_mode_;
+ }
+
+ private:
+ ParserBase* parser_;
+ Mode old_mode_;
+ };
+
Scanner* scanner() const { return scanner_; }
int position() { return scanner_->location().beg_pos; }
int peek_position() { return scanner_->peek_location().beg_pos; }
bool stack_overflow() const { return stack_overflow_; }
void set_stack_overflow() { stack_overflow_ = true; }
-
- virtual bool is_classic_mode() = 0;
+ Mode mode() const { return mode_; }
+ typename Traits::Type::Zone* zone() const { return zone_; }
INLINE(Token::Value peek()) {
if (stack_overflow_) return Token::ILLEGAL;
@@ -132,25 +302,128 @@ class ParserBase {
}
}
- bool peek_any_identifier();
- void ExpectSemicolon(bool* ok);
- bool CheckContextualKeyword(Vector<const char> keyword);
- void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
+ void ExpectSemicolon(bool* ok) {
+ // Check for automatic semicolon insertion according to
+ // the rules given in ECMA-262, section 7.9, page 21.
+ Token::Value tok = peek();
+ if (tok == Token::SEMICOLON) {
+ Next();
+ return;
+ }
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
+ tok == Token::RBRACE ||
+ tok == Token::EOS) {
+ return;
+ }
+ Expect(Token::SEMICOLON, ok);
+ }
- // Strict mode octal literal validation.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+ bool peek_any_identifier() {
+ Token::Value next = peek();
+ return next == Token::IDENTIFIER ||
+ next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ next == Token::YIELD;
+ }
+
+ bool CheckContextualKeyword(Vector<const char> keyword) {
+ if (peek() == Token::IDENTIFIER &&
+ scanner()->is_next_contextual_keyword(keyword)) {
+ Consume(Token::IDENTIFIER);
+ return true;
+ }
+ return false;
+ }
+
+ void ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
+ Expect(Token::IDENTIFIER, ok);
+ if (!*ok) return;
+ if (!scanner()->is_literal_contextual_keyword(keyword)) {
+ ReportUnexpectedToken(scanner()->current_token());
+ *ok = false;
+ }
+ }
+
+ // Checks whether an octal literal was last seen between beg_pos and end_pos.
+ // If so, reports an error. Only called for strict mode.
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ Scanner::Location octal = scanner()->octal_position();
+ if (octal.IsValid() && beg_pos <= octal.beg_pos &&
+ octal.end_pos <= end_pos) {
+ ReportMessageAt(octal, "strict_octal_literal");
+ scanner()->clear_octal_position();
+ *ok = false;
+ }
+ }
// Determine precedence of given token.
- static int Precedence(Token::Value token, bool accept_IN);
+ static int Precedence(Token::Value token, bool accept_IN) {
+ if (token == Token::IN && !accept_IN)
+ return 0; // 0 precedence will terminate binary expression parsing
+ return Token::Precedence(token);
+ }
+
+ typename Traits::Type::Factory* factory() {
+ return function_state_->factory();
+ }
+
+ StrictMode strict_mode() { return scope_->strict_mode(); }
+ bool is_generator() const { return function_state_->is_generator(); }
// Report syntax errors.
- void ReportUnexpectedToken(Token::Value token);
- void ReportMessageAt(Scanner::Location location, const char* type) {
- ReportMessageAt(location, type, Vector<const char*>::empty());
+ void ReportMessage(const char* message, Vector<const char*> args,
+ bool is_reference_error = false) {
+ Scanner::Location source_location = scanner()->location();
+ Traits::ReportMessageAt(source_location, message, args, is_reference_error);
}
- virtual void ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<const char*> args) = 0;
+
+ void ReportMessageAt(Scanner::Location location, const char* message,
+ bool is_reference_error = false) {
+ Traits::ReportMessageAt(location, message, Vector<const char*>::empty(),
+ is_reference_error);
+ }
+
+ void ReportUnexpectedToken(Token::Value token);
+
+ // Recursive descent functions:
+
+ // Parses an identifier that is valid for the current scope, in particular it
+ // fails on strict mode future reserved keywords in a strict scope. If
+ // allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
+ // "arguments" as identifier even in strict mode (this is needed in cases like
+ // "var foo = eval;").
+ IdentifierT ParseIdentifier(
+ AllowEvalOrArgumentsAsIdentifier,
+ bool* ok);
+ // Parses an identifier or a strict mode future reserved word, and indicate
+ // whether it is strict mode future reserved.
+ IdentifierT ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved,
+ bool* ok);
+ IdentifierT ParseIdentifierName(bool* ok);
+ // Parses an identifier and determines whether or not it is 'get' or 'set'.
+ IdentifierT ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
+
+ ExpressionT ParseRegExpLiteral(bool seen_equal, bool* ok);
+
+ ExpressionT ParsePrimaryExpression(bool* ok);
+ ExpressionT ParseExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseArrayLiteral(bool* ok);
+ ExpressionT ParseObjectLiteral(bool* ok);
+ typename Traits::Type::ExpressionList ParseArguments(bool* ok);
+ ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseYieldExpression(bool* ok);
+ ExpressionT ParseConditionalExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
+ ExpressionT ParseUnaryExpression(bool* ok);
+ ExpressionT ParsePostfixExpression(bool* ok);
+ ExpressionT ParseLeftHandSideExpression(bool* ok);
+ ExpressionT ParseMemberWithNewPrefixesExpression(bool* ok);
+ ExpressionT ParseMemberExpression(bool* ok);
+ ExpressionT ParseMemberExpressionContinuation(ExpressionT expression,
+ bool* ok);
// Used to detect duplicates in object literals. Each of the values
// kGetterProperty, kSetterProperty and kValueProperty represents
@@ -176,10 +449,10 @@ class ParserBase {
// Validation per ECMA 262 - 11.1.5 "Object Initialiser".
class ObjectLiteralChecker {
public:
- ObjectLiteralChecker(ParserBase* parser, LanguageMode mode)
+ ObjectLiteralChecker(ParserBase* parser, StrictMode strict_mode)
: parser_(parser),
finder_(scanner()->unicode_cache()),
- language_mode_(mode) { }
+ strict_mode_(strict_mode) { }
void CheckProperty(Token::Value property, PropertyKind type, bool* ok);
@@ -203,9 +476,22 @@ class ParserBase {
ParserBase* parser_;
DuplicateFinder finder_;
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
};
+ // If true, the next (and immediately following) function literal is
+ // preceded by a parenthesis.
+ // Heuristically that means that the function will be called immediately,
+ // so never lazily compile it.
+ bool parenthesized_function_;
+
+ typename Traits::Type::Scope* scope_; // Scope stack.
+ FunctionState* function_state_; // Function state stack.
+ v8::Extension* extension_;
+ FuncNameInferrer* fni_;
+ ParserRecorder* log_;
+ Mode mode_;
+
private:
Scanner* scanner_;
uintptr_t stack_limit_;
@@ -215,6 +501,490 @@ class ParserBase {
bool allow_natives_syntax_;
bool allow_generators_;
bool allow_for_of_;
+
+ typename Traits::Type::Zone* zone_; // Only used by Parser.
+};
+
+
+class PreParserIdentifier {
+ public:
+ PreParserIdentifier() : type_(kUnknownIdentifier) {}
+ static PreParserIdentifier Default() {
+ return PreParserIdentifier(kUnknownIdentifier);
+ }
+ static PreParserIdentifier Eval() {
+ return PreParserIdentifier(kEvalIdentifier);
+ }
+ static PreParserIdentifier Arguments() {
+ return PreParserIdentifier(kArgumentsIdentifier);
+ }
+ static PreParserIdentifier FutureReserved() {
+ return PreParserIdentifier(kFutureReservedIdentifier);
+ }
+ static PreParserIdentifier FutureStrictReserved() {
+ return PreParserIdentifier(kFutureStrictReservedIdentifier);
+ }
+ static PreParserIdentifier Yield() {
+ return PreParserIdentifier(kYieldIdentifier);
+ }
+ bool IsEval() { return type_ == kEvalIdentifier; }
+ bool IsArguments() { return type_ == kArgumentsIdentifier; }
+ bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
+ bool IsYield() { return type_ == kYieldIdentifier; }
+ bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
+ bool IsFutureStrictReserved() {
+ return type_ == kFutureStrictReservedIdentifier;
+ }
+ bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
+
+ private:
+ enum Type {
+ kUnknownIdentifier,
+ kFutureReservedIdentifier,
+ kFutureStrictReservedIdentifier,
+ kYieldIdentifier,
+ kEvalIdentifier,
+ kArgumentsIdentifier
+ };
+ explicit PreParserIdentifier(Type type) : type_(type) {}
+ Type type_;
+
+ friend class PreParserExpression;
+};
+
+
+// Bits 0 and 1 are used to identify the type of expression:
+// If bit 0 is set, it's an identifier.
+// if bit 1 is set, it's a string literal.
+// If neither is set, it's no particular type, and both set isn't
+// use yet.
+class PreParserExpression {
+ public:
+ static PreParserExpression Default() {
+ return PreParserExpression(kUnknownExpression);
+ }
+
+ static PreParserExpression FromIdentifier(PreParserIdentifier id) {
+ return PreParserExpression(kIdentifierFlag |
+ (id.type_ << kIdentifierShift));
+ }
+
+ static PreParserExpression StringLiteral() {
+ return PreParserExpression(kUnknownStringLiteral);
+ }
+
+ static PreParserExpression UseStrictStringLiteral() {
+ return PreParserExpression(kUseStrictString);
+ }
+
+ static PreParserExpression This() {
+ return PreParserExpression(kThisExpression);
+ }
+
+ static PreParserExpression ThisProperty() {
+ return PreParserExpression(kThisPropertyExpression);
+ }
+
+ static PreParserExpression Property() {
+ return PreParserExpression(kPropertyExpression);
+ }
+
+ bool IsIdentifier() { return (code_ & kIdentifierFlag) != 0; }
+
+ // Only works corretly if it is actually an identifier expression.
+ PreParserIdentifier AsIdentifier() {
+ return PreParserIdentifier(
+ static_cast<PreParserIdentifier::Type>(code_ >> kIdentifierShift));
+ }
+
+ bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
+
+ bool IsUseStrictLiteral() {
+ return (code_ & kStringLiteralMask) == kUseStrictString;
+ }
+
+ bool IsThis() { return code_ == kThisExpression; }
+
+ bool IsThisProperty() { return code_ == kThisPropertyExpression; }
+
+ bool IsProperty() {
+ return code_ == kPropertyExpression || code_ == kThisPropertyExpression;
+ }
+
+ bool IsValidLeftHandSide() {
+ return IsIdentifier() || IsProperty();
+ }
+
+ // At the moment PreParser doesn't track these expression types.
+ bool IsFunctionLiteral() const { return false; }
+ bool IsCall() const { return false; }
+ bool IsCallNew() const { return false; }
+
+ PreParserExpression AsFunctionLiteral() { return *this; }
+
+ // Dummy implementation for making expression->somefunc() work in both Parser
+ // and PreParser.
+ PreParserExpression* operator->() { return this; }
+
+ // More dummy implementations of things PreParser doesn't need to track:
+ void set_index(int index) {} // For YieldExpressions
+ void set_parenthesized() {}
+
+ private:
+ // Least significant 2 bits are used as flags. Bits 0 and 1 represent
+ // identifiers or strings literals, and are mutually exclusive, but can both
+ // be absent. If the expression is an identifier or a string literal, the
+ // other bits describe the type (see PreParserIdentifier::Type and string
+ // literal constants below).
+ enum {
+ kUnknownExpression = 0,
+ // Identifiers
+ kIdentifierFlag = 1, // Used to detect labels.
+ kIdentifierShift = 3,
+
+ kStringLiteralFlag = 2, // Used to detect directive prologue.
+ kUnknownStringLiteral = kStringLiteralFlag,
+ kUseStrictString = kStringLiteralFlag | 8,
+ kStringLiteralMask = kUseStrictString,
+
+ // Below here applies if neither identifier nor string literal. Reserve the
+ // 2 least significant bits for flags.
+ kThisExpression = 1 << 2,
+ kThisPropertyExpression = 2 << 2,
+ kPropertyExpression = 3 << 2
+ };
+
+ explicit PreParserExpression(int expression_code) : code_(expression_code) {}
+
+ int code_;
+};
+
+
+// PreParserExpressionList doesn't actually store the expressions because
+// PreParser doesn't need to.
+class PreParserExpressionList {
+ public:
+ // These functions make list->Add(some_expression) work (and do nothing).
+ PreParserExpressionList() : length_(0) {}
+ PreParserExpressionList* operator->() { return this; }
+ void Add(PreParserExpression, void*) { ++length_; }
+ int length() const { return length_; }
+ private:
+ int length_;
+};
+
+
+class PreParserScope {
+ public:
+ explicit PreParserScope(PreParserScope* outer_scope, ScopeType scope_type)
+ : scope_type_(scope_type) {
+ strict_mode_ = outer_scope ? outer_scope->strict_mode() : SLOPPY;
+ }
+
+ ScopeType type() { return scope_type_; }
+ StrictMode strict_mode() const { return strict_mode_; }
+ void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
+
+ private:
+ ScopeType scope_type_;
+ StrictMode strict_mode_;
+};
+
+
+class PreParserFactory {
+ public:
+ explicit PreParserFactory(void* extra_param) {}
+ PreParserExpression NewLiteral(PreParserIdentifier identifier,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewNumberLiteral(double number,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
+ PreParserIdentifier js_flags,
+ int literal_index,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewArrayLiteral(PreParserExpressionList values,
+ int literal_index,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteralProperty(bool is_getter,
+ PreParserExpression value,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
+ PreParserExpression value) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
+ int literal_index,
+ int boilerplate_properties,
+ bool has_function,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewVariableProxy(void* generator_variable) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewProperty(PreParserExpression obj,
+ PreParserExpression key,
+ int pos) {
+ if (obj.IsThis()) {
+ return PreParserExpression::ThisProperty();
+ }
+ return PreParserExpression::Property();
+ }
+ PreParserExpression NewUnaryOperation(Token::Value op,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewBinaryOperation(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCompareOperation(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewAssignment(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewYield(PreParserExpression generator_object,
+ PreParserExpression expression,
+ Yield::Kind yield_kind,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewConditional(PreParserExpression condition,
+ PreParserExpression then_expression,
+ PreParserExpression else_expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCountOperation(Token::Value op,
+ bool is_prefix,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCall(PreParserExpression expression,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCallNew(PreParserExpression expression,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+};
+
+
+class PreParser;
+
+class PreParserTraits {
+ public:
+ struct Type {
+ // TODO(marja): To be removed. The Traits object should contain all the data
+ // it needs.
+ typedef PreParser* Parser;
+
+ // Used by FunctionState and BlockState.
+ typedef PreParserScope Scope;
+ // PreParser doesn't need to store generator variables.
+ typedef void GeneratorVariable;
+ // No interaction with Zones.
+ typedef void Zone;
+
+ // Return types for traversing functions.
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ typedef PreParserExpression YieldExpression;
+ typedef PreParserExpression FunctionLiteral;
+ typedef PreParserExpression ObjectLiteralProperty;
+ typedef PreParserExpression Literal;
+ typedef PreParserExpressionList ExpressionList;
+ typedef PreParserExpressionList PropertyList;
+
+ // For constructing objects returned by the traversing functions.
+ typedef PreParserFactory Factory;
+ };
+
+ explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
+
+ // Custom operations executed when FunctionStates are created and
+ // destructed. (The PreParser doesn't need to do anything.)
+ template<typename FunctionState>
+ static void SetUpFunctionState(FunctionState* function_state, void*) {}
+ template<typename FunctionState>
+ static void TearDownFunctionState(FunctionState* function_state) {}
+
+ // Helper functions for recursive descent.
+ static bool IsEvalOrArguments(PreParserIdentifier identifier) {
+ return identifier.IsEvalOrArguments();
+ }
+
+ // Returns true if the expression is of type "this.foo".
+ static bool IsThisProperty(PreParserExpression expression) {
+ return expression.IsThisProperty();
+ }
+
+ static bool IsIdentifier(PreParserExpression expression) {
+ return expression.IsIdentifier();
+ }
+
+ static bool IsBoilerplateProperty(PreParserExpression property) {
+ // PreParser doesn't count boilerplate properties.
+ return false;
+ }
+
+ static bool IsArrayIndex(PreParserIdentifier string, uint32_t* index) {
+ return false;
+ }
+
+ // Functions for encapsulating the differences between parsing and preparsing;
+ // operations interleaved with the recursive descent.
+ static void PushLiteralName(FuncNameInferrer* fni, PreParserIdentifier id) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+ static void PushPropertyName(FuncNameInferrer* fni,
+ PreParserExpression expression) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+
+ static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
+ PreParserScope* scope, PreParserExpression value, bool* has_function) {}
+
+ static void CheckAssigningFunctionLiteralToProperty(
+ PreParserExpression left, PreParserExpression right) {}
+
+ // PreParser doesn't need to keep track of eval calls.
+ static void CheckPossibleEvalCall(PreParserExpression expression,
+ PreParserScope* scope) {}
+
+ static PreParserExpression MarkExpressionAsLValue(
+ PreParserExpression expression) {
+ // TODO(marja): To be able to produce the same errors, the preparser needs
+ // to start tracking which expressions are variables and which are lvalues.
+ return expression;
+ }
+
+ // Checks LHS expression for assignment and prefix/postfix increment/decrement
+ // in strict mode.
+ void CheckStrictModeLValue(PreParserExpression expression, bool* ok);
+
+ bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
+ PreParserExpression y,
+ Token::Value op,
+ int pos,
+ PreParserFactory* factory) {
+ return false;
+ }
+
+ PreParserExpression BuildUnaryExpression(PreParserExpression expression,
+ Token::Value op, int pos,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ // Reporting errors.
+ void ReportMessageAt(Scanner::Location location,
+ const char* message,
+ Vector<const char*> args,
+ bool is_reference_error = false);
+ void ReportMessageAt(Scanner::Location location,
+ const char* type,
+ const char* name_opt,
+ bool is_reference_error = false);
+ void ReportMessageAt(int start_pos,
+ int end_pos,
+ const char* type,
+ const char* name_opt,
+ bool is_reference_error = false);
+
+ // "null" return type creators.
+ static PreParserIdentifier EmptyIdentifier() {
+ return PreParserIdentifier::Default();
+ }
+ static PreParserExpression EmptyExpression() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpression EmptyLiteral() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpressionList NullExpressionList() {
+ return PreParserExpressionList();
+ }
+
+ // Odd-ball literal creators.
+ static PreParserExpression GetLiteralTheHole(int position,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ // Producing data during the recursive descent.
+ PreParserIdentifier GetSymbol(Scanner* scanner);
+ static PreParserIdentifier NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured) {
+ return PreParserIdentifier::Default();
+ }
+
+ static PreParserExpression ThisExpression(PreParserScope* scope,
+ PreParserFactory* factory) {
+ return PreParserExpression::This();
+ }
+
+ static PreParserExpression ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression ExpressionFromIdentifier(
+ PreParserIdentifier name, int pos, PreParserScope* scope,
+ PreParserFactory* factory) {
+ return PreParserExpression::FromIdentifier(name);
+ }
+
+ PreParserExpression ExpressionFromString(int pos,
+ Scanner* scanner,
+ PreParserFactory* factory = NULL);
+
+ static PreParserExpressionList NewExpressionList(int size, void* zone) {
+ return PreParserExpressionList();
+ }
+
+ static PreParserExpressionList NewPropertyList(int size, void* zone) {
+ return PreParserExpressionList();
+ }
+
+ // Temporary glue; these functions will move to ParserBase.
+ PreParserExpression ParseV8Intrinsic(bool* ok);
+ PreParserExpression ParseFunctionLiteral(
+ PreParserIdentifier name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ bool* ok);
+
+ private:
+ PreParser* pre_parser_;
};
@@ -230,36 +1000,34 @@ class ParserBase {
// rather it is to speed up properly written and correct programs.
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-class PreParser : public ParserBase {
+class PreParser : public ParserBase<PreParserTraits> {
public:
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+
enum PreParseResult {
kPreParseStackOverflow,
kPreParseSuccess
};
- PreParser(Scanner* scanner,
- ParserRecorder* log,
- uintptr_t stack_limit)
- : ParserBase(scanner, stack_limit),
- log_(log),
- scope_(NULL),
- parenthesized_function_(false) { }
-
- ~PreParser() {}
+ PreParser(Scanner* scanner, ParserRecorder* log, uintptr_t stack_limit)
+ : ParserBase<PreParserTraits>(scanner, stack_limit, NULL, log, NULL,
+ this) {}
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
PreParseResult PreParseProgram() {
- Scope top_scope(&scope_, kTopLevelScope);
+ PreParserScope scope(scope_, GLOBAL_SCOPE);
+ FunctionState top_scope(&function_state_, &scope_, &scope, NULL);
bool ok = true;
int start_position = scanner()->peek_location().beg_pos;
ParseSourceElements(Token::EOS, &ok);
if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
ReportUnexpectedToken(scanner()->current_token());
- } else if (!scope_->is_classic_mode()) {
+ } else if (scope_->strict_mode() == STRICT) {
CheckOctalLiteral(start_position, scanner()->location().end_pos, &ok);
}
return kPreParseSuccess;
@@ -273,21 +1041,18 @@ class PreParser : public ParserBase {
// keyword and parameters, and have consumed the initial '{'.
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
- PreParseResult PreParseLazyFunction(LanguageMode mode,
+ PreParseResult PreParseLazyFunction(StrictMode strict_mode,
bool is_generator,
ParserRecorder* log);
private:
+ friend class PreParserTraits;
+
// These types form an algebra over syntactic categories that is just
// rich enough to let us recognize and propagate the constructs that
// are either being counted in the preparser data, or is important
// to throw the correct syntax error exceptions.
- enum ScopeType {
- kTopLevelScope,
- kFunctionScope
- };
-
enum VariableDeclarationContext {
kSourceElement,
kStatement,
@@ -300,142 +1065,6 @@ class PreParser : public ParserBase {
kHasNoInitializers
};
- class Expression;
-
- class Identifier {
- public:
- static Identifier Default() {
- return Identifier(kUnknownIdentifier);
- }
- static Identifier Eval() {
- return Identifier(kEvalIdentifier);
- }
- static Identifier Arguments() {
- return Identifier(kArgumentsIdentifier);
- }
- static Identifier FutureReserved() {
- return Identifier(kFutureReservedIdentifier);
- }
- static Identifier FutureStrictReserved() {
- return Identifier(kFutureStrictReservedIdentifier);
- }
- static Identifier Yield() {
- return Identifier(kYieldIdentifier);
- }
- bool IsEval() { return type_ == kEvalIdentifier; }
- bool IsArguments() { return type_ == kArgumentsIdentifier; }
- bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
- bool IsYield() { return type_ == kYieldIdentifier; }
- bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
- bool IsFutureStrictReserved() {
- return type_ == kFutureStrictReservedIdentifier;
- }
- bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
-
- private:
- enum Type {
- kUnknownIdentifier,
- kFutureReservedIdentifier,
- kFutureStrictReservedIdentifier,
- kYieldIdentifier,
- kEvalIdentifier,
- kArgumentsIdentifier
- };
- explicit Identifier(Type type) : type_(type) { }
- Type type_;
-
- friend class Expression;
- };
-
- // Bits 0 and 1 are used to identify the type of expression:
- // If bit 0 is set, it's an identifier.
- // if bit 1 is set, it's a string literal.
- // If neither is set, it's no particular type, and both set isn't
- // use yet.
- class Expression {
- public:
- static Expression Default() {
- return Expression(kUnknownExpression);
- }
-
- static Expression FromIdentifier(Identifier id) {
- return Expression(kIdentifierFlag | (id.type_ << kIdentifierShift));
- }
-
- static Expression StringLiteral() {
- return Expression(kUnknownStringLiteral);
- }
-
- static Expression UseStrictStringLiteral() {
- return Expression(kUseStrictString);
- }
-
- static Expression This() {
- return Expression(kThisExpression);
- }
-
- static Expression ThisProperty() {
- return Expression(kThisPropertyExpression);
- }
-
- static Expression StrictFunction() {
- return Expression(kStrictFunctionExpression);
- }
-
- bool IsIdentifier() {
- return (code_ & kIdentifierFlag) != 0;
- }
-
- // Only works corretly if it is actually an identifier expression.
- PreParser::Identifier AsIdentifier() {
- return PreParser::Identifier(
- static_cast<PreParser::Identifier::Type>(code_ >> kIdentifierShift));
- }
-
- bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
-
- bool IsUseStrictLiteral() {
- return (code_ & kStringLiteralMask) == kUseStrictString;
- }
-
- bool IsThis() {
- return code_ == kThisExpression;
- }
-
- bool IsThisProperty() {
- return code_ == kThisPropertyExpression;
- }
-
- bool IsStrictFunction() {
- return code_ == kStrictFunctionExpression;
- }
-
- private:
- // First two/three bits are used as flags.
- // Bit 0 and 1 represent identifiers or strings literals, and are
- // mutually exclusive, but can both be absent.
- enum {
- kUnknownExpression = 0,
- // Identifiers
- kIdentifierFlag = 1, // Used to detect labels.
- kIdentifierShift = 3,
-
- kStringLiteralFlag = 2, // Used to detect directive prologue.
- kUnknownStringLiteral = kStringLiteralFlag,
- kUseStrictString = kStringLiteralFlag | 8,
- kStringLiteralMask = kUseStrictString,
-
- // Below here applies if neither identifier nor string literal.
- kThisExpression = 4,
- kThisPropertyExpression = 8,
- kStrictFunctionExpression = 12
- };
-
- explicit Expression(int expression_code) : code_(expression_code) { }
-
- int code_;
- };
-
class Statement {
public:
static Statement Default() {
@@ -487,86 +1116,6 @@ class PreParser : public ParserBase {
kUnknownSourceElements
};
- typedef int Arguments;
-
- class Scope {
- public:
- Scope(Scope** variable, ScopeType type)
- : variable_(variable),
- prev_(*variable),
- type_(type),
- materialized_literal_count_(0),
- expected_properties_(0),
- with_nesting_count_(0),
- language_mode_(
- (prev_ != NULL) ? prev_->language_mode() : CLASSIC_MODE),
- is_generator_(false) {
- *variable = this;
- }
- ~Scope() { *variable_ = prev_; }
- void NextMaterializedLiteralIndex() { materialized_literal_count_++; }
- void AddProperty() { expected_properties_++; }
- ScopeType type() { return type_; }
- int expected_properties() { return expected_properties_; }
- int materialized_literal_count() { return materialized_literal_count_; }
- bool IsInsideWith() { return with_nesting_count_ != 0; }
- bool is_generator() { return is_generator_; }
- void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
- bool is_classic_mode() {
- return language_mode_ == CLASSIC_MODE;
- }
- LanguageMode language_mode() {
- return language_mode_;
- }
- void set_language_mode(LanguageMode language_mode) {
- language_mode_ = language_mode;
- }
-
- class InsideWith {
- public:
- explicit InsideWith(Scope* scope) : scope_(scope) {
- scope->with_nesting_count_++;
- }
-
- ~InsideWith() { scope_->with_nesting_count_--; }
-
- private:
- Scope* scope_;
- DISALLOW_COPY_AND_ASSIGN(InsideWith);
- };
-
- private:
- Scope** const variable_;
- Scope* const prev_;
- const ScopeType type_;
- int materialized_literal_count_;
- int expected_properties_;
- int with_nesting_count_;
- LanguageMode language_mode_;
- bool is_generator_;
- };
-
- // Report syntax error
- void ReportMessageAt(Scanner::Location location,
- const char* message,
- Vector<const char*> args) {
- ReportMessageAt(location.beg_pos,
- location.end_pos,
- message,
- args.length() > 0 ? args[0] : NULL);
- }
- void ReportMessageAt(Scanner::Location location,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
- }
- void ReportMessageAt(int start_pos,
- int end_pos,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(start_pos, end_pos, type, name_opt);
- }
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -595,68 +1144,1015 @@ class PreParser : public ParserBase {
Statement ParseThrowStatement(bool* ok);
Statement ParseTryStatement(bool* ok);
Statement ParseDebuggerStatement(bool* ok);
-
- Expression ParseExpression(bool accept_IN, bool* ok);
- Expression ParseAssignmentExpression(bool accept_IN, bool* ok);
- Expression ParseYieldExpression(bool* ok);
Expression ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- Expression ParseUnaryExpression(bool* ok);
- Expression ParsePostfixExpression(bool* ok);
- Expression ParseLeftHandSideExpression(bool* ok);
- Expression ParseNewExpression(bool* ok);
- Expression ParseMemberExpression(bool* ok);
- Expression ParseMemberWithNewPrefixesExpression(unsigned new_count, bool* ok);
- Expression ParsePrimaryExpression(bool* ok);
- Expression ParseArrayLiteral(bool* ok);
Expression ParseObjectLiteral(bool* ok);
- Expression ParseRegExpLiteral(bool seen_equal, bool* ok);
Expression ParseV8Intrinsic(bool* ok);
- Arguments ParseArguments(bool* ok);
Expression ParseFunctionLiteral(
Identifier name,
Scanner::Location function_name_location,
bool name_is_strict_reserved,
bool is_generator,
+ int function_token_pos,
+ FunctionLiteral::FunctionType function_type,
bool* ok);
void ParseLazyFunctionLiteralBody(bool* ok);
- Identifier ParseIdentifier(AllowEvalOrArgumentsAsIdentifier, bool* ok);
- Identifier ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
- bool* ok);
- Identifier ParseIdentifierName(bool* ok);
- Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
// Logs the currently parsed literal as a symbol in the preparser data.
void LogSymbol();
- // Log the currently parsed identifier.
- Identifier GetIdentifierSymbol();
// Log the currently parsed string literal.
Expression GetStringSymbol();
- void set_language_mode(LanguageMode language_mode) {
- scope_->set_language_mode(language_mode);
+ bool CheckInOrOf(bool accept_OF);
+};
+
+template<class Traits>
+ParserBase<Traits>::FunctionState::FunctionState(
+ FunctionState** function_state_stack,
+ typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope,
+ typename Traits::Type::Zone* extra_param)
+ : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+ next_handler_index_(0),
+ expected_property_count_(0),
+ is_generator_(false),
+ generator_object_variable_(NULL),
+ function_state_stack_(function_state_stack),
+ outer_function_state_(*function_state_stack),
+ scope_stack_(scope_stack),
+ outer_scope_(*scope_stack),
+ isolate_(NULL),
+ saved_ast_node_id_(0),
+ factory_(extra_param) {
+ *scope_stack_ = scope;
+ *function_state_stack = this;
+ Traits::SetUpFunctionState(this, extra_param);
+}
+
+
+template<class Traits>
+ParserBase<Traits>::FunctionState::~FunctionState() {
+ *scope_stack_ = outer_scope_;
+ *function_state_stack_ = outer_function_state_;
+ Traits::TearDownFunctionState(this);
+}
+
+
+template<class Traits>
+void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
+ Scanner::Location source_location = scanner()->location();
+
+ // Four of the tokens are treated specially
+ switch (token) {
+ case Token::EOS:
+ return ReportMessageAt(source_location, "unexpected_eos");
+ case Token::NUMBER:
+ return ReportMessageAt(source_location, "unexpected_token_number");
+ case Token::STRING:
+ return ReportMessageAt(source_location, "unexpected_token_string");
+ case Token::IDENTIFIER:
+ return ReportMessageAt(source_location, "unexpected_token_identifier");
+ case Token::FUTURE_RESERVED_WORD:
+ return ReportMessageAt(source_location, "unexpected_reserved");
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ return ReportMessageAt(source_location, strict_mode() == SLOPPY
+ ? "unexpected_token_identifier" : "unexpected_strict_reserved");
+ default:
+ const char* name = Token::String(token);
+ ASSERT(name != NULL);
+ Traits::ReportMessageAt(
+ source_location, "unexpected_token", Vector<const char*>(&name, 1));
+ }
+}
+
+
+template<class Traits>
+typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParseIdentifier(
+ AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
+ bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ IdentifierT name = this->GetSymbol(scanner());
+ if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
+ strict_mode() == STRICT && this->IsEvalOrArguments(name)) {
+ ReportMessageAt(scanner()->location(), "strict_eval_arguments");
+ *ok = false;
+ }
+ return name;
+ } else if (strict_mode() == SLOPPY &&
+ (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !is_generator()))) {
+ return this->GetSymbol(scanner());
+ } else {
+ this->ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::IdentifierT ParserBase<
+ Traits>::ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
+ bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ *is_strict_reserved = false;
+ } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !this->is_generator())) {
+ *is_strict_reserved = true;
+ } else {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+ return this->GetSymbol(scanner());
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::IdentifierT
+ParserBase<Traits>::ParseIdentifierName(bool* ok) {
+ Token::Value next = Next();
+ if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD &&
+ next != Token::FUTURE_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
+ this->ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+ return this->GetSymbol(scanner());
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::IdentifierT
+ParserBase<Traits>::ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ IdentifierT result = ParseIdentifierName(ok);
+ if (!*ok) return Traits::EmptyIdentifier();
+ scanner()->IsGetOrSet(is_get, is_set);
+ return result;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
+ bool seen_equal, bool* ok) {
+ int pos = peek_position();
+ if (!scanner()->ScanRegExpPattern(seen_equal)) {
+ Next();
+ ReportMessage("unterminated_regexp", Vector<const char*>::empty());
+ *ok = false;
+ return Traits::EmptyExpression();
}
- virtual bool is_classic_mode() {
- return scope_->language_mode() == CLASSIC_MODE;
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
+
+ IdentifierT js_pattern = this->NextLiteralString(scanner(), TENURED);
+ if (!scanner()->ScanRegExpFlags()) {
+ Next();
+ ReportMessageAt(scanner()->location(), "invalid_regexp_flags");
+ *ok = false;
+ return Traits::EmptyExpression();
+ }
+ IdentifierT js_flags = this->NextLiteralString(scanner(), TENURED);
+ Next();
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
+}
+
+
+#define CHECK_OK ok); \
+ if (!*ok) return this->EmptyExpression(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+// Used in functions where the return type is not ExpressionT.
+#define CHECK_OK_CUSTOM(x) ok); \
+ if (!*ok) return this->x(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
+ // PrimaryExpression ::
+ // 'this'
+ // 'null'
+ // 'true'
+ // 'false'
+ // Identifier
+ // Number
+ // String
+ // ArrayLiteral
+ // ObjectLiteral
+ // RegExpLiteral
+ // '(' Expression ')'
+
+ int pos = peek_position();
+ ExpressionT result = this->EmptyExpression();
+ Token::Value token = peek();
+ switch (token) {
+ case Token::THIS: {
+ Consume(Token::THIS);
+ result = this->ThisExpression(scope_, factory());
+ break;
+ }
+
+ case Token::NULL_LITERAL:
+ case Token::TRUE_LITERAL:
+ case Token::FALSE_LITERAL:
+ case Token::NUMBER:
+ Next();
+ result = this->ExpressionFromLiteral(token, pos, scanner(), factory());
+ break;
+
+ case Token::IDENTIFIER:
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD: {
+ // Using eval or arguments in this context is OK even in strict mode.
+ IdentifierT name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
+ result = this->ExpressionFromIdentifier(name, pos, scope_, factory());
+ break;
+ }
+
+ case Token::STRING: {
+ Consume(Token::STRING);
+ result = this->ExpressionFromString(pos, scanner(), factory());
+ break;
+ }
+
+ case Token::ASSIGN_DIV:
+ result = this->ParseRegExpLiteral(true, CHECK_OK);
+ break;
+
+ case Token::DIV:
+ result = this->ParseRegExpLiteral(false, CHECK_OK);
+ break;
+
+ case Token::LBRACK:
+ result = this->ParseArrayLiteral(CHECK_OK);
+ break;
+
+ case Token::LBRACE:
+ result = this->ParseObjectLiteral(CHECK_OK);
+ break;
+
+ case Token::LPAREN:
+ Consume(Token::LPAREN);
+ // Heuristically try to detect immediately called functions before
+ // seeing the call parentheses.
+ parenthesized_function_ = (peek() == Token::FUNCTION);
+ result = this->ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ break;
+
+ case Token::MOD:
+ if (allow_natives_syntax() || extension_ != NULL) {
+ result = this->ParseV8Intrinsic(CHECK_OK);
+ break;
+ }
+ // If we're not allowing special syntax we fall-through to the
+ // default case.
+
+ default: {
+ Next();
+ ReportUnexpectedToken(token);
+ *ok = false;
+ }
}
- bool is_extended_mode() {
- return scope_->language_mode() == EXTENDED_MODE;
+ return result;
+}
+
+// Precedence = 1
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
+ bool accept_IN, bool* ok) {
+ // Expression ::
+ // AssignmentExpression
+ // Expression ',' AssignmentExpression
+
+ ExpressionT result = this->ParseAssignmentExpression(accept_IN, CHECK_OK);
+ while (peek() == Token::COMMA) {
+ Expect(Token::COMMA, CHECK_OK);
+ int pos = position();
+ ExpressionT right = this->ParseAssignmentExpression(accept_IN, CHECK_OK);
+ result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
+ }
+ return result;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
+ bool* ok) {
+ // ArrayLiteral ::
+ // '[' Expression? (',' Expression?)* ']'
+
+ int pos = peek_position();
+ typename Traits::Type::ExpressionList values =
+ this->NewExpressionList(4, zone_);
+ Expect(Token::LBRACK, CHECK_OK);
+ while (peek() != Token::RBRACK) {
+ ExpressionT elem = this->EmptyExpression();
+ if (peek() == Token::COMMA) {
+ elem = this->GetLiteralTheHole(peek_position(), factory());
+ } else {
+ elem = this->ParseAssignmentExpression(true, CHECK_OK);
+ }
+ values->Add(elem, zone_);
+ if (peek() != Token::RBRACK) {
+ Expect(Token::COMMA, CHECK_OK);
+ }
}
+ Expect(Token::RBRACK, CHECK_OK);
- LanguageMode language_mode() { return scope_->language_mode(); }
+ // Update the scope information before the pre-parsing bailout.
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
- bool CheckInOrOf(bool accept_OF);
+ return factory()->NewArrayLiteral(values, literal_index, pos);
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
+ bool* ok) {
+ // ObjectLiteral ::
+ // '{' ((
+ // ((IdentifierName | String | Number) ':' AssignmentExpression) |
+ // (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
+ // ) ',')* '}'
+ // (Except that trailing comma is not required and not allowed.)
+
+ int pos = peek_position();
+ typename Traits::Type::PropertyList properties =
+ this->NewPropertyList(4, zone_);
+ int number_of_boilerplate_properties = 0;
+ bool has_function = false;
+
+ ObjectLiteralChecker checker(this, strict_mode());
+
+ Expect(Token::LBRACE, CHECK_OK);
+
+ while (peek() != Token::RBRACE) {
+ if (fni_ != NULL) fni_->Enter();
+
+ typename Traits::Type::Literal key = this->EmptyLiteral();
+ Token::Value next = peek();
+ int next_pos = peek_position();
+
+ switch (next) {
+ case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::IDENTIFIER: {
+ bool is_getter = false;
+ bool is_setter = false;
+ IdentifierT id =
+ ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+ if (fni_ != NULL) this->PushLiteralName(fni_, id);
+
+ if ((is_getter || is_setter) && peek() != Token::COLON) {
+ // Special handling of getter and setter syntax:
+ // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
+ // We have already read the "get" or "set" keyword.
+ Token::Value next = Next();
+ if (next != i::Token::IDENTIFIER &&
+ next != i::Token::FUTURE_RESERVED_WORD &&
+ next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
+ next != i::Token::NUMBER &&
+ next != i::Token::STRING &&
+ !Token::IsKeyword(next)) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return this->EmptyLiteral();
+ }
+ // Validate the property.
+ PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
+ checker.CheckProperty(next, type, CHECK_OK);
+ IdentifierT name = this->GetSymbol(scanner_);
+ typename Traits::Type::FunctionLiteral value =
+ this->ParseFunctionLiteral(
+ name, scanner()->location(),
+ false, // reserved words are allowed here
+ false, // not a generator
+ RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
+ CHECK_OK);
+ // Allow any number of parameters for compatibilty with JSC.
+ // Specification only allows zero parameters for get and one for set.
+ typename Traits::Type::ObjectLiteralProperty property =
+ factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
+ if (this->IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
+ properties->Add(property, zone());
+ if (peek() != Token::RBRACE) {
+ // Need {} because of the CHECK_OK macro.
+ Expect(Token::COMMA, CHECK_OK);
+ }
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
+ continue; // restart the while
+ }
+ // Failed to parse as get/set property, so it's just a normal property
+ // (which might be called "get" or "set" or something else).
+ key = factory()->NewLiteral(id, next_pos);
+ break;
+ }
+ case Token::STRING: {
+ Consume(Token::STRING);
+ IdentifierT string = this->GetSymbol(scanner_);
+ if (fni_ != NULL) this->PushLiteralName(fni_, string);
+ uint32_t index;
+ if (this->IsArrayIndex(string, &index)) {
+ key = factory()->NewNumberLiteral(index, next_pos);
+ break;
+ }
+ key = factory()->NewLiteral(string, next_pos);
+ break;
+ }
+ case Token::NUMBER: {
+ Consume(Token::NUMBER);
+ key = this->ExpressionFromLiteral(Token::NUMBER, next_pos, scanner_,
+ factory());
+ break;
+ }
+ default:
+ if (Token::IsKeyword(next)) {
+ Consume(next);
+ IdentifierT string = this->GetSymbol(scanner_);
+ key = factory()->NewLiteral(string, next_pos);
+ } else {
+ Token::Value next = Next();
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return this->EmptyLiteral();
+ }
+ }
+
+ // Validate the property
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
+
+ Expect(Token::COLON, CHECK_OK);
+ ExpressionT value = this->ParseAssignmentExpression(true, CHECK_OK);
+
+ typename Traits::Type::ObjectLiteralProperty property =
+ factory()->NewObjectLiteralProperty(key, value);
+
+ // Mark top-level object literals that contain function literals and
+ // pretenure the literal so it can be added as a constant function
+ // property. (Parser only.)
+ this->CheckFunctionLiteralInsideTopLevelObjectLiteral(scope_, value,
+ &has_function);
+
+ // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
+ if (this->IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
+ properties->Add(property, zone());
+
+ // TODO(1240767): Consider allowing trailing comma.
+ if (peek() != Token::RBRACE) {
+ // Need {} because of the CHECK_OK macro.
+ Expect(Token::COMMA, CHECK_OK);
+ }
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+
+ // Computation of literal_index must happen before pre parse bailout.
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
+
+ return factory()->NewObjectLiteral(properties,
+ literal_index,
+ number_of_boilerplate_properties,
+ has_function,
+ pos);
+}
+
+
+template <class Traits>
+typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
+ bool* ok) {
+ // Arguments ::
+ // '(' (AssignmentExpression)*[','] ')'
+
+ typename Traits::Type::ExpressionList result =
+ this->NewExpressionList(4, zone_);
+ Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullExpressionList));
+ bool done = (peek() == Token::RPAREN);
+ while (!done) {
+ ExpressionT argument = this->ParseAssignmentExpression(
+ true, CHECK_OK_CUSTOM(NullExpressionList));
+ result->Add(argument, zone_);
+ if (result->length() > Code::kMaxArguments) {
+ ReportMessageAt(scanner()->location(), "too_many_arguments");
+ *ok = false;
+ return this->NullExpressionList();
+ }
+ done = (peek() == Token::RPAREN);
+ if (!done) {
+ // Need {} because of the CHECK_OK_CUSTOM macro.
+ Expect(Token::COMMA, CHECK_OK_CUSTOM(NullExpressionList));
+ }
+ }
+ Expect(Token::RPAREN, CHECK_OK_CUSTOM(NullExpressionList));
+ return result;
+}
+
+// Precedence = 2
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
+ // AssignmentExpression ::
+ // ConditionalExpression
+ // YieldExpression
+ // LeftHandSideExpression AssignmentOperator AssignmentExpression
+
+ Scanner::Location lhs_location = scanner()->peek_location();
+
+ if (peek() == Token::YIELD && is_generator()) {
+ return this->ParseYieldExpression(ok);
+ }
+
+ if (fni_ != NULL) fni_->Enter();
+ ExpressionT expression =
+ this->ParseConditionalExpression(accept_IN, CHECK_OK);
+
+ if (!Token::IsAssignmentOp(peek())) {
+ if (fni_ != NULL) fni_->Leave();
+ // Parsed conditional expression only (no assignment).
+ return expression;
+ }
+
+ if (!expression->IsValidLeftHandSide()) {
+ this->ReportMessageAt(lhs_location, "invalid_lhs_in_assignment", true);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ if (strict_mode() == STRICT) {
+ // Assignment to eval or arguments is disallowed in strict mode.
+ this->CheckStrictModeLValue(expression, CHECK_OK);
+ }
+ expression = this->MarkExpressionAsLValue(expression);
+
+ Token::Value op = Next(); // Get assignment operator.
+ int pos = position();
+ ExpressionT right = this->ParseAssignmentExpression(accept_IN, CHECK_OK);
+
+ // TODO(1231235): We try to estimate the set of properties set by
+ // constructors. We define a new property whenever there is an
+ // assignment to a property of 'this'. We should probably only add
+ // properties if we haven't seen them before. Otherwise we'll
+ // probably overestimate the number of properties.
+ if (op == Token::ASSIGN && this->IsThisProperty(expression)) {
+ function_state_->AddProperty();
+ }
+
+ this->CheckAssigningFunctionLiteralToProperty(expression, right);
+
+ if (fni_ != NULL) {
+ // Check if the right hand side is a call to avoid inferring a
+ // name if we're dealing with "a = function(){...}();"-like
+ // expression.
+ if ((op == Token::INIT_VAR
+ || op == Token::INIT_CONST_LEGACY
+ || op == Token::ASSIGN)
+ && (!right->IsCall() && !right->IsCallNew())) {
+ fni_->Infer();
+ } else {
+ fni_->RemoveLastFunction();
+ }
+ fni_->Leave();
+ }
+
+ return factory()->NewAssignment(op, expression, right, pos);
+}
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseYieldExpression(bool* ok) {
+ // YieldExpression ::
+ // 'yield' '*'? AssignmentExpression
+ int pos = peek_position();
+ Expect(Token::YIELD, CHECK_OK);
+ Yield::Kind kind =
+ Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
+ ExpressionT generator_object =
+ factory()->NewVariableProxy(function_state_->generator_object_variable());
+ ExpressionT expression =
+ ParseAssignmentExpression(false, CHECK_OK);
+ typename Traits::Type::YieldExpression yield =
+ factory()->NewYield(generator_object, expression, kind, pos);
+ if (kind == Yield::DELEGATING) {
+ yield->set_index(function_state_->NextHandlerIndex());
+ }
+ return yield;
+}
+
+
+// Precedence = 3
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseConditionalExpression(bool accept_IN, bool* ok) {
+ // ConditionalExpression ::
+ // LogicalOrExpression
+ // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
+
+ int pos = peek_position();
+ // We start using the binary expression parser for prec >= 4 only!
+ ExpressionT expression = this->ParseBinaryExpression(4, accept_IN, CHECK_OK);
+ if (peek() != Token::CONDITIONAL) return expression;
+ Consume(Token::CONDITIONAL);
+ // In parsing the first assignment expression in conditional
+ // expressions we always accept the 'in' keyword; see ECMA-262,
+ // section 11.12, page 58.
+ ExpressionT left = ParseAssignmentExpression(true, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
+ ExpressionT right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ return factory()->NewConditional(expression, left, right, pos);
+}
+
+
+// Precedence >= 4
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
+ ASSERT(prec >= 4);
+ ExpressionT x = this->ParseUnaryExpression(CHECK_OK);
+ for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
+ // prec1 >= 4
+ while (Precedence(peek(), accept_IN) == prec1) {
+ Token::Value op = Next();
+ int pos = position();
+ ExpressionT y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
+
+ if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
+ factory())) {
+ continue;
+ }
+
+ // For now we distinguish between comparisons and other binary
+ // operations. (We could combine the two and get rid of this
+ // code and AST node eventually.)
+ if (Token::IsCompareOp(op)) {
+ // We have a comparison.
+ Token::Value cmp = op;
+ switch (op) {
+ case Token::NE: cmp = Token::EQ; break;
+ case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
+ default: break;
+ }
+ x = factory()->NewCompareOperation(cmp, x, y, pos);
+ if (cmp != op) {
+ // The comparison was negated - add a NOT.
+ x = factory()->NewUnaryOperation(Token::NOT, x, pos);
+ }
+
+ } else {
+ // We have a "normal" binary operation.
+ x = factory()->NewBinaryOperation(op, x, y, pos);
+ }
+ }
+ }
+ return x;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseUnaryExpression(bool* ok) {
+ // UnaryExpression ::
+ // PostfixExpression
+ // 'delete' UnaryExpression
+ // 'void' UnaryExpression
+ // 'typeof' UnaryExpression
+ // '++' UnaryExpression
+ // '--' UnaryExpression
+ // '+' UnaryExpression
+ // '-' UnaryExpression
+ // '~' UnaryExpression
+ // '!' UnaryExpression
+
+ Token::Value op = peek();
+ if (Token::IsUnaryOp(op)) {
+ op = Next();
+ int pos = position();
+ ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+
+ // "delete identifier" is a syntax error in strict mode.
+ if (op == Token::DELETE && strict_mode() == STRICT &&
+ this->IsIdentifier(expression)) {
+ ReportMessage("strict_delete", Vector<const char*>::empty());
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ // Allow Traits do rewrite the expression.
+ return this->BuildUnaryExpression(expression, op, pos, factory());
+ } else if (Token::IsCountOp(op)) {
+ op = Next();
+ Scanner::Location lhs_location = scanner()->peek_location();
+ ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+ if (!expression->IsValidLeftHandSide()) {
+ ReportMessageAt(lhs_location, "invalid_lhs_in_prefix_op", true);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ if (strict_mode() == STRICT) {
+ // Prefix expression operand in strict mode may not be eval or arguments.
+ this->CheckStrictModeLValue(expression, CHECK_OK);
+ }
+ this->MarkExpressionAsLValue(expression);
+
+ return factory()->NewCountOperation(op,
+ true /* prefix */,
+ expression,
+ position());
+
+ } else {
+ return this->ParsePostfixExpression(ok);
+ }
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParsePostfixExpression(bool* ok) {
+ // PostfixExpression ::
+ // LeftHandSideExpression ('++' | '--')?
+
+ Scanner::Location lhs_location = scanner()->peek_location();
+ ExpressionT expression = this->ParseLeftHandSideExpression(CHECK_OK);
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ Token::IsCountOp(peek())) {
+ if (!expression->IsValidLeftHandSide()) {
+ ReportMessageAt(lhs_location, "invalid_lhs_in_postfix_op", true);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ if (strict_mode() == STRICT) {
+ // Postfix expression operand in strict mode may not be eval or arguments.
+ this->CheckStrictModeLValue(expression, CHECK_OK);
+ }
+ expression = this->MarkExpressionAsLValue(expression);
+
+ Token::Value next = Next();
+ expression =
+ factory()->NewCountOperation(next,
+ false /* postfix */,
+ expression,
+ position());
+ }
+ return expression;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseLeftHandSideExpression(bool* ok) {
+ // LeftHandSideExpression ::
+ // (NewExpression | MemberExpression) ...
+
+ ExpressionT result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
+
+ while (true) {
+ switch (peek()) {
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
+ int pos = position();
+ ExpressionT index = ParseExpression(true, CHECK_OK);
+ result = factory()->NewProperty(result, index, pos);
+ Expect(Token::RBRACK, CHECK_OK);
+ break;
+ }
+
+ case Token::LPAREN: {
+ int pos;
+ if (scanner()->current_token() == Token::IDENTIFIER) {
+ // For call of an identifier we want to report position of
+ // the identifier as position of the call in the stack trace.
+ pos = position();
+ } else {
+ // For other kinds of calls we record position of the parenthesis as
+ // position of the call. Note that this is extremely important for
+ // expressions of the form function(){...}() for which call position
+ // should not point to the closing brace otherwise it will intersect
+ // with positions recorded for function literal and confuse debugger.
+ pos = peek_position();
+ // Also the trailing parenthesis are a hint that the function will
+ // be called immediately. If we happen to have parsed a preceding
+ // function literal eagerly, we can also compile it eagerly.
+ if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
+ result->AsFunctionLiteral()->set_parenthesized();
+ }
+ }
+ typename Traits::Type::ExpressionList args = ParseArguments(CHECK_OK);
+
+ // Keep track of eval() calls since they disable all local variable
+ // optimizations.
+ // The calls that need special treatment are the
+ // direct eval calls. These calls are all of the form eval(...), with
+ // no explicit receiver.
+ // These calls are marked as potentially direct eval calls. Whether
+ // they are actually direct calls to eval is determined at run time.
+ this->CheckPossibleEvalCall(result, scope_);
+ result = factory()->NewCall(result, args, pos);
+ if (fni_ != NULL) fni_->RemoveLastFunction();
+ break;
+ }
+
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
+ int pos = position();
+ IdentifierT name = ParseIdentifierName(CHECK_OK);
+ result = factory()->NewProperty(
+ result, factory()->NewLiteral(name, pos), pos);
+ if (fni_ != NULL) this->PushLiteralName(fni_, name);
+ break;
+ }
+
+ default:
+ return result;
+ }
+ }
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(bool* ok) {
+ // NewExpression ::
+ // ('new')+ MemberExpression
+
+ // The grammar for new expressions is pretty warped. We can have several 'new'
+ // keywords following each other, and then a MemberExpression. When we see '('
+ // after the MemberExpression, it's associated with the rightmost unassociated
+ // 'new' to create a NewExpression with arguments. However, a NewExpression
+ // can also occur without arguments.
+
+ // Examples of new expression:
+ // new foo.bar().baz means (new (foo.bar)()).baz
+ // new foo()() means (new foo())()
+ // new new foo()() means (new (new foo())())
+ // new new foo means new (new foo)
+ // new new foo() means new (new foo())
+ // new new foo().bar().baz means (new (new foo()).bar()).baz
+
+ if (peek() == Token::NEW) {
+ Consume(Token::NEW);
+ int new_pos = position();
+ ExpressionT result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
+ if (peek() == Token::LPAREN) {
+ // NewExpression with arguments.
+ typename Traits::Type::ExpressionList args =
+ this->ParseArguments(CHECK_OK);
+ result = factory()->NewCallNew(result, args, new_pos);
+ // The expression can still continue with . or [ after the arguments.
+ result = this->ParseMemberExpressionContinuation(result, CHECK_OK);
+ return result;
+ }
+ // NewExpression without arguments.
+ return factory()->NewCallNew(result, this->NewExpressionList(0, zone_),
+ new_pos);
+ }
+ // No 'new' keyword.
+ return this->ParseMemberExpression(ok);
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseMemberExpression(bool* ok) {
+ // MemberExpression ::
+ // (PrimaryExpression | FunctionLiteral)
+ // ('[' Expression ']' | '.' Identifier | Arguments)*
+
+ // The '[' Expression ']' and '.' Identifier parts are parsed by
+ // ParseMemberExpressionContinuation, and the Arguments part is parsed by the
+ // caller.
+
+ // Parse the initial primary or function expression.
+ ExpressionT result = this->EmptyExpression();
+ if (peek() == Token::FUNCTION) {
+ Consume(Token::FUNCTION);
+ int function_token_position = position();
+ bool is_generator = allow_generators() && Check(Token::MUL);
+ IdentifierT name;
+ bool is_strict_reserved_name = false;
+ Scanner::Location function_name_location = Scanner::Location::invalid();
+ FunctionLiteral::FunctionType function_type =
+ FunctionLiteral::ANONYMOUS_EXPRESSION;
+ if (peek_any_identifier()) {
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
+ CHECK_OK);
+ function_name_location = scanner()->location();
+ function_type = FunctionLiteral::NAMED_EXPRESSION;
+ }
+ result = this->ParseFunctionLiteral(name,
+ function_name_location,
+ is_strict_reserved_name,
+ is_generator,
+ function_token_position,
+ function_type,
+ CHECK_OK);
+ } else {
+ result = ParsePrimaryExpression(CHECK_OK);
+ }
+
+ result = ParseMemberExpressionContinuation(result, CHECK_OK);
+ return result;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseMemberExpressionContinuation(ExpressionT expression,
+ bool* ok) {
+ // Parses this part of MemberExpression:
+ // ('[' Expression ']' | '.' Identifier)*
+ while (true) {
+ switch (peek()) {
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
+ int pos = position();
+ ExpressionT index = this->ParseExpression(true, CHECK_OK);
+ expression = factory()->NewProperty(expression, index, pos);
+ if (fni_ != NULL) {
+ this->PushPropertyName(fni_, index);
+ }
+ Expect(Token::RBRACK, CHECK_OK);
+ break;
+ }
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
+ int pos = position();
+ IdentifierT name = ParseIdentifierName(CHECK_OK);
+ expression = factory()->NewProperty(
+ expression, factory()->NewLiteral(name, pos), pos);
+ if (fni_ != NULL) {
+ this->PushLiteralName(fni_, name);
+ }
+ break;
+ }
+ default:
+ return expression;
+ }
+ }
+ ASSERT(false);
+ return this->EmptyExpression();
+}
+
+
+#undef CHECK_OK
+#undef CHECK_OK_CUSTOM
+
+
+template <typename Traits>
+void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
+ Token::Value property,
+ PropertyKind type,
+ bool* ok) {
+ int old;
+ if (property == Token::NUMBER) {
+ old = scanner()->FindNumber(&finder_, type);
+ } else {
+ old = scanner()->FindSymbol(&finder_, type);
+ }
+ PropertyKind old_type = static_cast<PropertyKind>(old);
+ if (HasConflict(old_type, type)) {
+ if (IsDataDataConflict(old_type, type)) {
+ // Both are data properties.
+ if (strict_mode_ == SLOPPY) return;
+ parser()->ReportMessageAt(scanner()->location(),
+ "strict_duplicate_property");
+ } else if (IsDataAccessorConflict(old_type, type)) {
+ // Both a data and an accessor property with the same name.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_data_property");
+ } else {
+ ASSERT(IsAccessorAccessorConflict(old_type, type));
+ // Both accessors of the same type.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_get_set");
+ }
+ *ok = false;
+ }
+}
- ParserRecorder* log_;
- Scope* scope_;
- bool parenthesized_function_;
-};
} } // v8::internal
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index e363f67761..9aeb8f5c23 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -47,7 +47,7 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
line_number_(line_number),
column_number_(column_number),
shared_id_(0),
- script_id_(v8::Script::kNoScriptId),
+ script_id_(v8::UnboundScript::kNoScriptId),
no_frame_ranges_(NULL),
bailout_reason_(kEmptyBailoutReason) { }
diff --git a/deps/v8/src/promise.js b/deps/v8/src/promise.js
index db7863f809..50f91ae0ba 100644
--- a/deps/v8/src/promise.js
+++ b/deps/v8/src/promise.js
@@ -34,33 +34,7 @@
// var $WeakMap = global.WeakMap
-var $Promise = Promise;
-
-
-//-------------------------------------------------------------------
-
-// Core functionality.
-
-// Event queue format: [(value, [(handler, deferred)*])*]
-// I.e., a list of value/tasks pairs, where the value is a resolution value or
-// rejection reason, and the tasks are a respective list of handler/deferred
-// pairs waiting for notification of this value. Each handler is an onResolve or
-// onReject function provided to the same call of 'chain' that produced the
-// associated deferred.
-var promiseEvents = new InternalArray;
-
-// Status values: 0 = pending, +1 = resolved, -1 = rejected
-var promiseStatus = NEW_PRIVATE("Promise#status");
-var promiseValue = NEW_PRIVATE("Promise#value");
-var promiseOnResolve = NEW_PRIVATE("Promise#onResolve");
-var promiseOnReject = NEW_PRIVATE("Promise#onReject");
-var promiseRaw = NEW_PRIVATE("Promise#raw");
-
-function IsPromise(x) {
- return IS_SPEC_OBJECT(x) && %HasLocalProperty(x, promiseStatus);
-}
-
-function Promise(resolver) {
+var $Promise = function Promise(resolver) {
if (resolver === promiseRaw) return;
if (!%_IsConstructCall()) throw MakeTypeError('not_a_promise', [this]);
if (typeof resolver !== 'function')
@@ -74,6 +48,22 @@ function Promise(resolver) {
}
}
+
+//-------------------------------------------------------------------
+
+// Core functionality.
+
+// Status values: 0 = pending, +1 = resolved, -1 = rejected
+var promiseStatus = GLOBAL_PRIVATE("Promise#status");
+var promiseValue = GLOBAL_PRIVATE("Promise#value");
+var promiseOnResolve = GLOBAL_PRIVATE("Promise#onResolve");
+var promiseOnReject = GLOBAL_PRIVATE("Promise#onReject");
+var promiseRaw = GLOBAL_PRIVATE("Promise#raw");
+
+function IsPromise(x) {
+ return IS_SPEC_OBJECT(x) && %HasLocalProperty(x, promiseStatus);
+}
+
function PromiseSet(promise, status, value, onResolve, onReject) {
SET_PRIVATE(promise, promiseStatus, status);
SET_PRIVATE(promise, promiseValue, value);
@@ -102,12 +92,21 @@ function PromiseReject(promise, r) {
}
+// For API.
+
+function PromiseNopResolver() {}
+
+function PromiseCreate() {
+ return new $Promise(PromiseNopResolver)
+}
+
+
// Convenience.
function PromiseDeferred() {
if (this === $Promise) {
// Optimized case, avoid extra closure.
- var promise = PromiseInit(new Promise(promiseRaw));
+ var promise = PromiseInit(new $Promise(promiseRaw));
return {
promise: promise,
resolve: function(x) { PromiseResolve(promise, x) },
@@ -126,7 +125,7 @@ function PromiseDeferred() {
function PromiseResolved(x) {
if (this === $Promise) {
// Optimized case, avoid extra closure.
- return PromiseSet(new Promise(promiseRaw), +1, x);
+ return PromiseSet(new $Promise(promiseRaw), +1, x);
} else {
return new this(function(resolve, reject) { resolve(x) });
}
@@ -135,7 +134,7 @@ function PromiseResolved(x) {
function PromiseRejected(r) {
if (this === $Promise) {
// Optimized case, avoid extra closure.
- return PromiseSet(new Promise(promiseRaw), -1, r);
+ return PromiseSet(new $Promise(promiseRaw), -1, r);
} else {
return new this(function(resolve, reject) { reject(r) });
}
@@ -169,64 +168,68 @@ function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
}
function PromiseCatch(onReject) {
- return this.chain(UNDEFINED, onReject);
+ return this.then(UNDEFINED, onReject);
}
function PromiseEnqueue(value, tasks) {
- promiseEvents.push(value, tasks);
+ GetMicrotaskQueue().push(function() {
+ for (var i = 0; i < tasks.length; i += 2) {
+ PromiseHandle(value, tasks[i], tasks[i + 1])
+ }
+ });
+
%SetMicrotaskPending(true);
}
-function PromiseMicrotaskRunner() {
- var events = promiseEvents;
- if (events.length > 0) {
- promiseEvents = new InternalArray;
- for (var i = 0; i < events.length; i += 2) {
- var value = events[i];
- var tasks = events[i + 1];
- for (var j = 0; j < tasks.length; j += 2) {
- var handler = tasks[j];
- var deferred = tasks[j + 1];
- try {
- var result = handler(value);
- if (result === deferred.promise)
- throw MakeTypeError('promise_cyclic', [result]);
- else if (IsPromise(result))
- result.chain(deferred.resolve, deferred.reject);
- else
- deferred.resolve(result);
- } catch(e) {
- // TODO(rossberg): perhaps log uncaught exceptions below.
- try { deferred.reject(e) } catch(e) {}
- }
- }
- }
+function PromiseHandle(value, handler, deferred) {
+ try {
+ var result = handler(value);
+ if (result === deferred.promise)
+ throw MakeTypeError('promise_cyclic', [result]);
+ else if (IsPromise(result))
+ %_CallFunction(result, deferred.resolve, deferred.reject, PromiseChain);
+ else
+ deferred.resolve(result);
+ } catch(e) {
+ // TODO(rossberg): perhaps log uncaught exceptions below.
+ try { deferred.reject(e) } catch(e) {}
}
}
-RunMicrotasks.runners.push(PromiseMicrotaskRunner);
// Multi-unwrapped chaining with thenable coercion.
function PromiseThen(onResolve, onReject) {
- onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
+ onResolve =
+ IS_NULL_OR_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
+ onReject =
+ IS_NULL_OR_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
var that = this;
var constructor = this.constructor;
- return this.chain(
+ return %_CallFunction(
+ this,
function(x) {
x = PromiseCoerce(constructor, x);
return x === that ? onReject(MakeTypeError('promise_cyclic', [x])) :
IsPromise(x) ? x.then(onResolve, onReject) : onResolve(x);
},
- onReject
+ onReject,
+ PromiseChain
);
}
PromiseCoerce.table = new $WeakMap;
function PromiseCoerce(constructor, x) {
- if (!(IsPromise(x) || IS_NULL_OR_UNDEFINED(x))) {
- var then = x.then;
+ if (!IsPromise(x) && IS_SPEC_OBJECT(x)) {
+ var then;
+ try {
+ then = x.then;
+ } catch(r) {
+ var promise = %_CallFunction(constructor, r, PromiseRejected);
+ PromiseCoerce.table.set(x, promise);
+ return promise;
+ }
if (typeof then === 'function') {
if (PromiseCoerce.table.has(x)) {
return PromiseCoerce.table.get(x);
@@ -235,8 +238,8 @@ function PromiseCoerce(constructor, x) {
PromiseCoerce.table.set(x, deferred.promise);
try {
%_CallFunction(x, deferred.resolve, deferred.reject, then);
- } catch(e) {
- deferred.reject(e);
+ } catch(r) {
+ deferred.reject(r);
}
return deferred.promise;
}
@@ -250,19 +253,23 @@ function PromiseCoerce(constructor, x) {
function PromiseCast(x) {
// TODO(rossberg): cannot do better until we support @@create.
- return IsPromise(x) ? x : this.resolve(x);
+ return IsPromise(x) ? x : new this(function(resolve) { resolve(x) });
}
function PromiseAll(values) {
var deferred = %_CallFunction(this, PromiseDeferred);
var resolutions = [];
+ if (!%_IsArray(values)) {
+ deferred.reject(MakeTypeError('invalid_argument'));
+ return deferred.promise;
+ }
try {
var count = values.length;
if (count === 0) {
deferred.resolve(resolutions);
} else {
for (var i = 0; i < values.length; ++i) {
- this.cast(values[i]).chain(
+ this.resolve(values[i]).then(
function(i, x) {
resolutions[i] = x;
if (--count === 0) deferred.resolve(resolutions);
@@ -279,9 +286,13 @@ function PromiseAll(values) {
function PromiseOne(values) {
var deferred = %_CallFunction(this, PromiseDeferred);
+ if (!%_IsArray(values)) {
+ deferred.reject(MakeTypeError('invalid_argument'));
+ return deferred.promise;
+ }
try {
for (var i = 0; i < values.length; ++i) {
- this.cast(values[i]).chain(
+ this.resolve(values[i]).then(
function(x) { deferred.resolve(x) },
function(r) { deferred.reject(r) }
);
@@ -295,16 +306,15 @@ function PromiseOne(values) {
//-------------------------------------------------------------------
function SetUpPromise() {
- %CheckIsBootstrapping()
- var global_receiver = %GlobalReceiver(global);
- global_receiver.Promise = $Promise;
+ %CheckIsBootstrapping();
+ %SetProperty(global, 'Promise', $Promise, DONT_ENUM);
InstallFunctions($Promise, DONT_ENUM, [
"defer", PromiseDeferred,
- "resolve", PromiseResolved,
+ "accept", PromiseResolved,
"reject", PromiseRejected,
"all", PromiseAll,
"race", PromiseOne,
- "cast", PromiseCast
+ "resolve", PromiseCast
]);
InstallFunctions($Promise.prototype, DONT_ENUM, [
"chain", PromiseChain,
diff --git a/deps/v8/src/property-details-inl.h b/deps/v8/src/property-details-inl.h
new file mode 100644
index 0000000000..98eb1cf58e
--- /dev/null
+++ b/deps/v8/src/property-details-inl.h
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PROPERTY_DETAILS_INL_H_
+#define V8_PROPERTY_DETAILS_INL_H_
+
+#include "objects.h"
+#include "property-details.h"
+#include "v8conversions.h"
+
+namespace v8 {
+namespace internal {
+
+inline bool Representation::CanContainDouble(double value) {
+ if (IsDouble() || is_more_general_than(Representation::Double())) {
+ return true;
+ }
+ if (IsInt32Double(value)) {
+ if (IsInteger32()) return true;
+ if (IsSmi()) return Smi::IsValid(static_cast<int32_t>(value));
+ }
+ return false;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_PROPERTY_DETAILS_INL_H_
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index b8baff2c26..01050dbd4f 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -148,6 +148,8 @@ class Representation {
return other.is_more_general_than(*this) || other.Equals(*this);
}
+ bool CanContainDouble(double value);
+
Representation generalize(Representation other) {
if (other.fits_into(*this)) return *this;
if (other.is_more_general_than(*this)) return other;
@@ -233,11 +235,11 @@ class PropertyDetails BASE_EMBEDDED {
| FieldIndexField::encode(field_index);
}
- int pointer() { return DescriptorPointer::decode(value_); }
+ int pointer() const { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
- PropertyDetails CopyWithRepresentation(Representation representation) {
+ PropertyDetails CopyWithRepresentation(Representation representation) const {
return PropertyDetails(value_, representation);
}
PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) {
@@ -248,7 +250,7 @@ class PropertyDetails BASE_EMBEDDED {
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
- inline Smi* AsSmi();
+ inline Smi* AsSmi() const;
static uint8_t EncodeRepresentation(Representation representation) {
return representation.kind();
@@ -258,26 +260,26 @@ class PropertyDetails BASE_EMBEDDED {
return Representation::FromKind(static_cast<Representation::Kind>(bits));
}
- PropertyType type() { return TypeField::decode(value_); }
+ PropertyType type() const { return TypeField::decode(value_); }
PropertyAttributes attributes() const {
return AttributesField::decode(value_);
}
- int dictionary_index() {
+ int dictionary_index() const {
return DictionaryStorageField::decode(value_);
}
- Representation representation() {
+ Representation representation() const {
ASSERT(type() != NORMAL);
return DecodeRepresentation(RepresentationField::decode(value_));
}
- int field_index() {
+ int field_index() const {
return FieldIndexField::decode(value_);
}
- inline PropertyDetails AsDeleted();
+ inline PropertyDetails AsDeleted() const;
static bool IsValidIndex(int index) {
return DictionaryStorageField::is_valid(index);
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index da772dc86c..baa5a0f993 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -187,12 +187,12 @@ class LookupResult BASE_EMBEDDED {
transition_(NULL),
cacheable_(true),
details_(NONE, NONEXISTENT, Representation::None()) {
- isolate->SetTopLookupResult(this);
+ isolate->set_top_lookup_result(this);
}
~LookupResult() {
ASSERT(isolate()->top_lookup_result() == this);
- isolate()->SetTopLookupResult(next_);
+ isolate()->set_top_lookup_result(next_);
}
Isolate* isolate() const { return isolate_; }
@@ -200,9 +200,9 @@ class LookupResult BASE_EMBEDDED {
void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
lookup_type_ = DESCRIPTOR_TYPE;
holder_ = holder;
+ transition_ = NULL;
details_ = details;
number_ = number;
- transition_ = NULL;
}
bool CanHoldValue(Handle<Object> value) {
@@ -246,92 +246,93 @@ class LookupResult BASE_EMBEDDED {
lookup_type_ = NOT_FOUND;
details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
holder_ = NULL;
+ transition_ = NULL;
}
- JSObject* holder() {
+ JSObject* holder() const {
ASSERT(IsFound());
return JSObject::cast(holder_);
}
- JSProxy* proxy() {
+ JSProxy* proxy() const {
ASSERT(IsHandler());
return JSProxy::cast(holder_);
}
- PropertyType type() {
+ PropertyType type() const {
ASSERT(IsFound());
return details_.type();
}
- Representation representation() {
+ Representation representation() const {
ASSERT(IsFound());
ASSERT(!IsTransition());
ASSERT(details_.type() != NONEXISTENT);
return details_.representation();
}
- PropertyAttributes GetAttributes() {
+ PropertyAttributes GetAttributes() const {
ASSERT(!IsTransition());
ASSERT(IsFound());
ASSERT(details_.type() != NONEXISTENT);
return details_.attributes();
}
- PropertyDetails GetPropertyDetails() {
+ PropertyDetails GetPropertyDetails() const {
ASSERT(!IsTransition());
return details_;
}
- bool IsFastPropertyType() {
+ bool IsFastPropertyType() const {
ASSERT(IsFound());
return IsTransition() || type() != NORMAL;
}
// Property callbacks does not include transitions to callbacks.
- bool IsPropertyCallbacks() {
+ bool IsPropertyCallbacks() const {
ASSERT(!(details_.type() == CALLBACKS && !IsFound()));
return details_.type() == CALLBACKS;
}
- bool IsReadOnly() {
+ bool IsReadOnly() const {
ASSERT(IsFound());
ASSERT(!IsTransition());
ASSERT(details_.type() != NONEXISTENT);
return details_.IsReadOnly();
}
- bool IsField() {
+ bool IsField() const {
ASSERT(!(details_.type() == FIELD && !IsFound()));
return details_.type() == FIELD;
}
- bool IsNormal() {
+ bool IsNormal() const {
ASSERT(!(details_.type() == NORMAL && !IsFound()));
return details_.type() == NORMAL;
}
- bool IsConstant() {
+ bool IsConstant() const {
ASSERT(!(details_.type() == CONSTANT && !IsFound()));
return details_.type() == CONSTANT;
}
- bool IsConstantFunction() {
+ bool IsConstantFunction() const {
return IsConstant() && GetValue()->IsJSFunction();
}
- bool IsDontDelete() { return details_.IsDontDelete(); }
- bool IsDontEnum() { return details_.IsDontEnum(); }
- bool IsFound() { return lookup_type_ != NOT_FOUND; }
- bool IsTransition() { return lookup_type_ == TRANSITION_TYPE; }
- bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
- bool IsInterceptor() { return lookup_type_ == INTERCEPTOR_TYPE; }
+ bool IsDontDelete() const { return details_.IsDontDelete(); }
+ bool IsDontEnum() const { return details_.IsDontEnum(); }
+ bool IsFound() const { return lookup_type_ != NOT_FOUND; }
+ bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
+ bool IsHandler() const { return lookup_type_ == HANDLER_TYPE; }
+ bool IsInterceptor() const { return lookup_type_ == INTERCEPTOR_TYPE; }
// Is the result is a property excluding transitions and the null descriptor?
- bool IsProperty() {
+ bool IsProperty() const {
return IsFound() && !IsTransition();
}
- bool IsDataProperty() {
+ bool IsDataProperty() const {
switch (type()) {
case FIELD:
case NORMAL:
@@ -351,10 +352,10 @@ class LookupResult BASE_EMBEDDED {
return false;
}
- bool IsCacheable() { return cacheable_; }
+ bool IsCacheable() const { return cacheable_; }
void DisallowCaching() { cacheable_ = false; }
- Object* GetLazyValue() {
+ Object* GetLazyValue() const {
switch (type()) {
case FIELD:
return holder()->RawFastPropertyAt(GetFieldIndex().field_index());
@@ -379,66 +380,62 @@ class LookupResult BASE_EMBEDDED {
return NULL;
}
- Map* GetTransitionTarget() {
+ Map* GetTransitionTarget() const {
return transition_;
}
- PropertyDetails GetTransitionDetails() {
+ PropertyDetails GetTransitionDetails() const {
+ ASSERT(IsTransition());
return transition_->GetLastDescriptorDetails();
}
- bool IsTransitionToField() {
+ bool IsTransitionToField() const {
return IsTransition() && GetTransitionDetails().type() == FIELD;
}
- bool IsTransitionToConstant() {
+ bool IsTransitionToConstant() const {
return IsTransition() && GetTransitionDetails().type() == CONSTANT;
}
- int GetTransitionIndex() {
- ASSERT(IsTransition());
- return number_;
- }
-
- int GetDescriptorIndex() {
+ int GetDescriptorIndex() const {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
return number_;
}
- PropertyIndex GetFieldIndex() {
+ PropertyIndex GetFieldIndex() const {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
return PropertyIndex::NewFieldIndex(GetFieldIndexFromMap(holder()->map()));
}
- int GetLocalFieldIndexFromMap(Map* map) {
+ int GetLocalFieldIndexFromMap(Map* map) const {
return GetFieldIndexFromMap(map) - map->inobject_properties();
}
- int GetDictionaryEntry() {
+ int GetDictionaryEntry() const {
ASSERT(lookup_type_ == DICTIONARY_TYPE);
return number_;
}
- JSFunction* GetConstantFunction() {
+ JSFunction* GetConstantFunction() const {
ASSERT(type() == CONSTANT);
return JSFunction::cast(GetValue());
}
- Object* GetConstantFromMap(Map* map) {
+ Object* GetConstantFromMap(Map* map) const {
ASSERT(type() == CONSTANT);
return GetValueFromMap(map);
}
- JSFunction* GetConstantFunctionFromMap(Map* map) {
+ JSFunction* GetConstantFunctionFromMap(Map* map) const {
return JSFunction::cast(GetConstantFromMap(map));
}
- Object* GetConstant() {
+ Object* GetConstant() const {
ASSERT(type() == CONSTANT);
return GetValue();
}
- Object* GetCallbackObject() {
+ Object* GetCallbackObject() const {
ASSERT(type() == CALLBACKS && !IsTransition());
return GetValue();
}
@@ -447,7 +444,7 @@ class LookupResult BASE_EMBEDDED {
void Print(FILE* out);
#endif
- Object* GetValue() {
+ Object* GetValue() const {
if (lookup_type_ == DESCRIPTOR_TYPE) {
return GetValueFromMap(holder()->map());
}
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index c446b4b49f..75e4392478 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -38,8 +38,9 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler(assembler->zone()),
assembler_(assembler) {
unsigned int type = assembler->Implementation();
- ASSERT(type < 5);
- const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
+ ASSERT(type < 6);
+ const char* impl_names[] = {"IA32", "ARM", "ARM64",
+ "MIPS", "X64", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index 1ff8bd9797..fc3100867b 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -53,6 +53,7 @@ class RegExpMacroAssembler {
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
+ kARM64Implementation,
kMIPSImplementation,
kX64Implementation,
kBytecodeImplementation
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 422da34e7f..5142fd33df 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -153,25 +153,13 @@ namespace internal {
PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
-// Assert that the given argument has a valid value for a StrictModeFlag
-// and store it in a StrictModeFlag variable with the given name.
+// Assert that the given argument has a valid value for a StrictMode
+// and store it in a StrictMode variable with the given name.
#define CONVERT_STRICT_MODE_ARG_CHECKED(name, index) \
RUNTIME_ASSERT(args[index]->IsSmi()); \
- RUNTIME_ASSERT(args.smi_at(index) == kStrictMode || \
- args.smi_at(index) == kNonStrictMode); \
- StrictModeFlag name = \
- static_cast<StrictModeFlag>(args.smi_at(index));
-
-
-// Assert that the given argument has a valid value for a LanguageMode
-// and store it in a LanguageMode variable with the given name.
-#define CONVERT_LANGUAGE_MODE_ARG(name, index) \
- ASSERT(args[index]->IsSmi()); \
- ASSERT(args.smi_at(index) == CLASSIC_MODE || \
- args.smi_at(index) == STRICT_MODE || \
- args.smi_at(index) == EXTENDED_MODE); \
- LanguageMode name = \
- static_cast<LanguageMode>(args.smi_at(index));
+ RUNTIME_ASSERT(args.smi_at(index) == STRICT || \
+ args.smi_at(index) == SLOPPY); \
+ StrictMode name = static_cast<StrictMode>(args.smi_at(index));
static Handle<Map> ComputeObjectLiteralMap(
@@ -298,7 +286,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
// Array index as string (uint32).
result = JSObject::SetOwnElement(
- boilerplate, element_index, value, kNonStrictMode);
+ boilerplate, element_index, value, SLOPPY);
} else {
Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
@@ -309,7 +297,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
} else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
result = JSObject::SetOwnElement(
- boilerplate, element_index, value, kNonStrictMode);
+ boilerplate, element_index, value, SLOPPY);
} else {
// Non-uint32 number.
ASSERT(key->IsNumber());
@@ -480,7 +468,7 @@ static Handle<Object> CreateLiteralBoilerplate(
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateObjectLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
@@ -582,7 +570,7 @@ static MaybeObject* CreateArrayLiteralImpl(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateArrayLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
@@ -595,7 +583,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralStubBailout) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateArrayLiteralStubBailout) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
@@ -633,7 +621,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreatePrivateSymbol) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateGlobalPrivateSymbol) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<JSObject> registry = isolate->GetSymbolRegistry();
+ Handle<String> part = isolate->factory()->private_intern_string();
+ Handle<JSObject> privates =
+ Handle<JSObject>::cast(JSObject::GetProperty(registry, part));
+ Handle<Object> symbol = JSObject::GetProperty(privates, name);
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = isolate->factory()->NewPrivateSymbol();
+ Handle<Symbol>::cast(symbol)->set_name(*name);
+ JSObject::SetProperty(privates, name, symbol, NONE, STRICT);
+ }
+ return *symbol;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewSymbolWrapper) {
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Symbol, symbol, 0);
+ return symbol->ToObject(isolate);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolDescription) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(Symbol, symbol, 0);
@@ -641,6 +655,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolRegistry) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ return *isolate->GetSymbolRegistry();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolIsPrivate) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -890,6 +911,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferIsView) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferNeuter) {
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
+ if (array_buffer->backing_store() == NULL) {
+ CHECK(Smi::FromInt(0) == array_buffer->byte_length());
+ return isolate->heap()->undefined_value();
+ }
ASSERT(!array_buffer->is_external());
void* backing_store = array_buffer->backing_store();
size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
@@ -901,11 +926,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferNeuter) {
void Runtime::ArrayIdToTypeAndSize(
- int arrayId, ExternalArrayType* array_type, size_t* element_size) {
+ int arrayId,
+ ExternalArrayType* array_type,
+ ElementsKind* external_elements_kind,
+ ElementsKind* fixed_elements_kind,
+ size_t* element_size) {
switch (arrayId) {
#define ARRAY_ID_CASE(Type, type, TYPE, ctype, size) \
case ARRAY_ID_##TYPE: \
*array_type = kExternal##Type##Array; \
+ *external_elements_kind = EXTERNAL_##TYPE##_ELEMENTS; \
+ *fixed_elements_kind = TYPE##_ELEMENTS; \
*element_size = size; \
break;
@@ -923,7 +954,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
CONVERT_SMI_ARG_CHECKED(arrayId, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, maybe_buffer, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset_object, 3);
CONVERT_ARG_HANDLE_CHECKED(Object, byte_length_object, 4);
@@ -935,18 +966,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+ ElementsKind external_elements_kind =
+ EXTERNAL_INT8_ELEMENTS; // Bogus initialization.
+ ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
+ Runtime::ArrayIdToTypeAndSize(arrayId,
+ &array_type,
+ &external_elements_kind,
+ &fixed_elements_kind,
+ &element_size);
- holder->set_buffer(*buffer);
holder->set_byte_offset(*byte_offset_object);
holder->set_byte_length(*byte_length_object);
size_t byte_offset = NumberToSize(isolate, *byte_offset_object);
size_t byte_length = NumberToSize(isolate, *byte_length_object);
- size_t array_buffer_byte_length =
- NumberToSize(isolate, buffer->byte_length());
- CHECK(byte_offset <= array_buffer_byte_length);
- CHECK(array_buffer_byte_length - byte_offset >= byte_length);
CHECK_EQ(0, static_cast<int>(byte_length % element_size));
size_t length = byte_length / element_size;
@@ -959,14 +992,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
holder->set_length(*length_obj);
- holder->set_weak_next(buffer->weak_first_view());
- buffer->set_weak_first_view(*holder);
-
- Handle<ExternalArray> elements =
- isolate->factory()->NewExternalArray(
- static_cast<int>(length), array_type,
- static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- holder->set_elements(*elements);
+ if (!maybe_buffer->IsNull()) {
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(*maybe_buffer));
+
+ size_t array_buffer_byte_length =
+ NumberToSize(isolate, buffer->byte_length());
+ CHECK(byte_offset <= array_buffer_byte_length);
+ CHECK(array_buffer_byte_length - byte_offset >= byte_length);
+
+ holder->set_buffer(*buffer);
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+
+ Handle<ExternalArray> elements =
+ isolate->factory()->NewExternalArray(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ Handle<Map> map =
+ JSObject::GetElementsTransitionMap(holder, external_elements_kind);
+ holder->set_map_and_elements(*map, *elements);
+ ASSERT(IsExternalArrayElementsKind(holder->map()->elements_kind()));
+ } else {
+ holder->set_buffer(Smi::FromInt(0));
+ holder->set_weak_next(isolate->heap()->undefined_value());
+ Handle<FixedTypedArrayBase> elements =
+ isolate->factory()->NewFixedTypedArray(
+ static_cast<int>(length), array_type);
+ holder->set_elements(*elements);
+ }
return isolate->heap()->undefined_value();
}
@@ -992,7 +1045,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+ ElementsKind external_elements_kind =
+ EXTERNAL_INT8_ELEMENTS; // Bogus intialization.
+ ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
+ Runtime::ArrayIdToTypeAndSize(arrayId,
+ &array_type,
+ &external_elements_kind,
+ &fixed_elements_kind,
+ &element_size);
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
if (source->IsJSTypedArray() &&
@@ -1045,7 +1105,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
isolate->factory()->NewExternalArray(
static_cast<int>(length), array_type,
static_cast<uint8_t*>(buffer->backing_store()));
- holder->set_elements(*elements);
+ Handle<Map> map = JSObject::GetElementsTransitionMap(
+ holder, external_elements_kind);
+ holder->set_map_and_elements(*map, *elements);
if (source->IsJSTypedArray()) {
Handle<JSTypedArray> typed_array(JSTypedArray::cast(*source));
@@ -1053,7 +1115,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
if (typed_array->type() == holder->type()) {
uint8_t* backing_store =
static_cast<uint8_t*>(
- JSArrayBuffer::cast(typed_array->buffer())->backing_store());
+ typed_array->GetBuffer()->backing_store());
size_t source_byte_offset =
NumberToSize(isolate, typed_array->byte_offset());
memcpy(
@@ -1082,13 +1144,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
return typed_array->accessor(); \
}
-TYPED_ARRAY_GETTER(Buffer, buffer)
TYPED_ARRAY_GETTER(ByteLength, byte_length)
TYPED_ARRAY_GETTER(ByteOffset, byte_offset)
TYPED_ARRAY_GETTER(Length, length)
#undef TYPED_ARRAY_GETTER
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGetBuffer) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, holder, 0);
+ if (!holder->IsJSTypedArray())
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "not_typed_array", HandleVector<Object>(NULL, 0)));
+ Handle<JSTypedArray> typed_array(JSTypedArray::cast(*holder));
+ return *typed_array->GetBuffer();
+}
+
+
// Return codes for Runtime_TypedArraySetFastCases.
// Should be synchronized with typedarray.js natives.
enum TypedArraySetResultCodes {
@@ -1134,10 +1207,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
size_t source_offset = NumberToSize(isolate, source->byte_offset());
uint8_t* target_base =
static_cast<uint8_t*>(
- JSArrayBuffer::cast(target->buffer())->backing_store()) + target_offset;
+ target->GetBuffer()->backing_store()) + target_offset;
uint8_t* source_base =
static_cast<uint8_t*>(
- JSArrayBuffer::cast(source->buffer())->backing_store()) + source_offset;
+ source->GetBuffer()->backing_store()) + source_offset;
// Typed arrays of the same type: use memmove.
if (target->type() == source->type()) {
@@ -1153,8 +1226,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
target_base + target_byte_length > source_base)) {
// We do not support overlapping ArrayBuffers
ASSERT(
- JSArrayBuffer::cast(target->buffer())->backing_store() ==
- JSArrayBuffer::cast(source->buffer())->backing_store());
+ target->GetBuffer()->backing_store() ==
+ source->GetBuffer()->backing_store());
return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING);
} else { // Non-overlapping typed arrays
return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING);
@@ -1162,6 +1235,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayMaxSizeInHeap) {
+ ASSERT_OBJECT_SIZE(FLAG_typed_array_max_size_in_heap);
+ return Smi::FromInt(FLAG_typed_array_max_size_in_heap);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
@@ -1656,7 +1735,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
!isolate->MayNamedAccessWrapper(Handle<JSObject>::cast(obj),
isolate->factory()->proto_string(),
v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(JSObject::cast(*obj), v8::ACCESS_GET);
+ isolate->ReportFailedAccessCheckWrapper(Handle<JSObject>::cast(obj),
+ v8::ACCESS_GET);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
@@ -1687,11 +1767,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
!isolate->MayNamedAccessWrapper(obj,
isolate->factory()->proto_string(),
v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_SET);
+ isolate->ReportFailedAccessCheckWrapper(obj, v8::ACCESS_SET);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
- if (FLAG_harmony_observation && obj->map()->is_observed()) {
+ if (obj->map()->is_observed()) {
Handle<Object> old_value(
GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
@@ -1790,7 +1870,7 @@ static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
return ACCESS_ALLOWED;
}
- obj->GetIsolate()->ReportFailedAccessCheck(*obj, access_type);
+ obj->GetIsolate()->ReportFailedAccessCheckWrapper(obj, access_type);
return ACCESS_FORBIDDEN;
}
@@ -1829,7 +1909,7 @@ static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
break;
}
- isolate->ReportFailedAccessCheck(*obj, access_type);
+ isolate->ReportFailedAccessCheckWrapper(obj, access_type);
return ACCESS_FORBIDDEN;
}
@@ -1863,7 +1943,7 @@ static Handle<Object> GetOwnProperty(Isolate* isolate,
case ACCESS_ABSENT: return factory->undefined_value();
}
- PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
+ PropertyAttributes attrs = JSReceiver::GetLocalPropertyAttribute(obj, name);
if (attrs == ABSENT) {
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->undefined_value();
@@ -2053,6 +2133,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAccessorProperty) {
CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
CONVERT_SMI_ARG_CHECKED(attribute, 4);
CONVERT_SMI_ARG_CHECKED(access_control, 5);
+ RUNTIME_ASSERT(getter->IsUndefined() || getter->IsFunctionTemplateInfo());
+ RUNTIME_ASSERT(setter->IsUndefined() || setter->IsFunctionTemplateInfo());
JSObject::DefineAccessor(object,
name,
InstantiateAccessorComponent(isolate, getter),
@@ -2076,7 +2158,7 @@ static Failure* ThrowRedeclarationError(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeclareGlobals) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
Handle<GlobalObject> global = Handle<GlobalObject>(
@@ -2106,17 +2188,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
// value of the variable if the property is already there.
// Do the lookup locally only, see ES5 erratum.
LookupResult lookup(isolate);
- if (FLAG_es52_globals) {
- global->LocalLookup(*name, &lookup, true);
- } else {
- global->Lookup(*name, &lookup);
- }
+ global->LocalLookup(*name, &lookup, true);
if (lookup.IsFound()) {
// We found an existing property. Unless it was an interceptor
// that claims the property is absent, skip this declaration.
if (!lookup.IsInterceptor()) continue;
- PropertyAttributes attributes = global->GetPropertyAttribute(*name);
- if (attributes != ABSENT) continue;
+ if (JSReceiver::GetPropertyAttribute(global, name) != ABSENT) continue;
// Fall-through and introduce the absent property by using
// SetProperty.
}
@@ -2145,7 +2222,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
attr |= READ_ONLY;
}
- LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
+ StrictMode strict_mode = DeclareGlobalsStrictMode::decode(flags);
if (!lookup.IsFound() || is_function) {
// If the local property exists, check that we can reconfigure it
@@ -2167,7 +2244,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
RETURN_IF_EMPTY_HANDLE(isolate,
JSObject::SetProperty(
global, name, value, static_cast<PropertyAttributes>(attr),
- language_mode == CLASSIC_MODE ? kNonStrictMode : kStrictMode));
+ strict_mode));
}
}
@@ -2176,7 +2253,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeclareContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
@@ -2223,8 +2300,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
Handle<JSObject> object = Handle<JSObject>::cast(holder);
RETURN_IF_EMPTY_HANDLE(
isolate,
- JSReceiver::SetProperty(object, name, initial_value, mode,
- kNonStrictMode));
+ JSReceiver::SetProperty(object, name, initial_value, mode, SLOPPY));
}
}
@@ -2270,7 +2346,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
JSObject::SetLocalPropertyIgnoreAttributes(object, name, value, mode));
} else {
RETURN_IF_EMPTY_HANDLE(isolate,
- JSReceiver::SetProperty(object, name, value, mode, kNonStrictMode));
+ JSReceiver::SetProperty(object, name, value, mode, SLOPPY));
}
}
@@ -2291,9 +2367,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
RUNTIME_ASSERT(args[1]->IsSmi());
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
- StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
+ CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 1);
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable.
@@ -2309,15 +2383,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
LookupResult lookup(isolate);
isolate->context()->global_object()->LocalLookup(*name, &lookup, true);
if (lookup.IsInterceptor()) {
+ Handle<JSObject> holder(lookup.holder());
PropertyAttributes intercepted =
- lookup.holder()->GetPropertyAttribute(*name);
+ JSReceiver::GetPropertyAttribute(holder, name);
if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
// Found an interceptor that's not read only.
if (assign) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<Object> result = JSObject::SetPropertyForResult(
- handle(lookup.holder()), &lookup, name, value, attributes,
- strict_mode_flag);
+ holder, &lookup, name, value, attributes, strict_mode);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
} else {
@@ -2330,7 +2404,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<GlobalObject> global(isolate->context()->global_object());
Handle<Object> result = JSReceiver::SetProperty(
- global, name, value, attributes, strict_mode_flag);
+ global, name, value, attributes, strict_mode);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -2338,7 +2412,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_InitializeConstGlobal) {
SealHandleScope shs(isolate);
// All constants are declared with an initial value. The name
// of the constant is the first argument and the initial value
@@ -2381,11 +2455,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// BUG 1213575: Handle the case where we have to set a read-only
// property through an interceptor and only do it if it's
// uninitialized, e.g. the hole. Nirk...
- // Passing non-strict mode because the property is writable.
+ // Passing sloppy mode because the property is writable.
RETURN_IF_EMPTY_HANDLE(
isolate,
- JSReceiver::SetProperty(global, name, value, attributes,
- kNonStrictMode));
+ JSReceiver::SetProperty(global, name, value, attributes, SLOPPY));
return *value;
}
@@ -2416,7 +2489,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_InitializeConstContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -2455,7 +2528,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
isolate,
- JSReceiver::SetProperty(global, name, value, NONE, kNonStrictMode));
+ JSReceiver::SetProperty(global, name, value, NONE, SLOPPY));
return *value;
}
@@ -2506,8 +2579,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
isolate,
- JSReceiver::SetProperty(object, name, value, attributes,
- kNonStrictMode));
+ JSReceiver::SetProperty(object, name, value, attributes, SLOPPY));
}
}
@@ -2521,14 +2593,14 @@ RUNTIME_FUNCTION(MaybeObject*,
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_SMI_ARG_CHECKED(properties, 1);
- if (object->HasFastProperties()) {
+ if (object->HasFastProperties() && !object->IsJSGlobalProxy()) {
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
}
return *object;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_RegExpExec) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
@@ -2549,7 +2621,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_RegExpConstructResult) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_SMI_ARG_CHECKED(elements_count, 0);
@@ -2587,7 +2659,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
HandleScope scope(isolate);
- DisallowHeapAllocation no_allocation;
ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
@@ -2668,7 +2739,7 @@ static Handle<JSFunction> InstallBuiltin(Isolate* isolate,
code,
false);
optimized->shared()->DontAdaptArguments();
- JSReceiver::SetProperty(holder, key, optimized, NONE, kStrictMode);
+ JSReceiver::SetProperty(holder, key, optimized, NONE, STRICT);
return optimized;
}
@@ -2690,7 +2761,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsSloppyModeFunction) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
@@ -2704,7 +2775,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
}
JSFunction* function = JSFunction::cast(callable);
SharedFunctionInfo* shared = function->shared();
- return isolate->heap()->ToBoolean(shared->is_classic_mode());
+ return isolate->heap()->ToBoolean(shared->strict_mode() == SLOPPY);
}
@@ -2724,7 +2795,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
JSFunction* function = JSFunction::cast(callable);
SharedFunctionInfo* shared = function->shared();
- if (shared->native() || !shared->is_classic_mode()) {
+ if (shared->native() || shared->strict_mode() == STRICT) {
return isolate->heap()->undefined_value();
}
// Returns undefined for strict or native functions, or
@@ -2736,7 +2807,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_MaterializeRegExpLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
@@ -3054,7 +3125,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateJSGeneratorObject) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
@@ -3080,7 +3151,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_SuspendJSGeneratorObject) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
@@ -3131,7 +3202,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
// inlined into GeneratorNext and GeneratorThrow. EmitGeneratorResumeResume is
// called in any case, as it needs to reconstruct the stack frame and make space
// for arguments and operands.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ResumeJSGeneratorObject) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
@@ -3150,6 +3221,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
int offset = generator_object->continuation();
ASSERT(offset > 0);
frame->set_pc(pc + offset);
+ if (FLAG_enable_ool_constant_pool) {
+ frame->set_constant_pool(
+ generator_object->function()->code()->constant_pool());
+ }
generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting);
FixedArray* operand_stack = generator_object->operand_stack();
@@ -3175,7 +3250,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowGeneratorStateError) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
@@ -3208,7 +3283,7 @@ MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StringCharCodeAt) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -3308,8 +3383,7 @@ class FixedArrayBuilder {
}
Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- Factory* factory = target_array->GetIsolate()->factory();
- factory->SetContent(target_array, array_);
+ JSArray::SetContent(target_array, array_);
target_array->set_length(Smi::FromInt(length_));
return target_array;
}
@@ -3404,6 +3478,7 @@ class ReplacementStringBuilder {
Handle<String> joined_string;
if (is_ascii_) {
Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_);
+ RETURN_IF_EMPTY_HANDLE_VALUE(heap_->isolate(), seq, Handle<String>());
DisallowHeapAllocation no_gc;
uint8_t* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
@@ -3414,6 +3489,7 @@ class ReplacementStringBuilder {
} else {
// Non-ASCII.
Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_);
+ RETURN_IF_EMPTY_HANDLE_VALUE(heap_->isolate(), seq, Handle<String>());
DisallowHeapAllocation no_gc;
uc16* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
@@ -3428,9 +3504,11 @@ class ReplacementStringBuilder {
void IncrementCharacterCount(int by) {
if (character_count_ > String::kMaxLength - by) {
- V8::FatalProcessOutOfMemory("String.replace result too large.");
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ character_count_ = kMaxInt;
+ } else {
+ character_count_ += by;
}
- character_count_ += by;
}
private:
@@ -3911,20 +3989,25 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString(
static_cast<int64_t>(pattern_len)) *
static_cast<int64_t>(matches) +
static_cast<int64_t>(subject_len);
- if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException(0x11);
- int result_len = static_cast<int>(result_len_64);
+ int result_len;
+ if (result_len_64 > static_cast<int64_t>(String::kMaxLength)) {
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ result_len = kMaxInt; // Provoke exception.
+ } else {
+ result_len = static_cast<int>(result_len_64);
+ }
int subject_pos = 0;
int result_pos = 0;
- Handle<ResultSeqString> result;
+ Handle<String> result_seq;
if (ResultSeqString::kHasAsciiEncoding) {
- result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawOneByteString(result_len));
+ result_seq = isolate->factory()->NewRawOneByteString(result_len);
} else {
- result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawTwoByteString(result_len));
+ result_seq = isolate->factory()->NewRawTwoByteString(result_len);
}
+ RETURN_IF_EMPTY_HANDLE(isolate, result_seq);
+ Handle<ResultSeqString> result = Handle<ResultSeqString>::cast(result_seq);
for (int i = 0; i < matches; i++) {
// Copy non-matched subject content.
@@ -4053,7 +4136,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
capture_count,
global_cache.LastSuccessfulMatch());
- return *(builder.ToString());
+ Handle<String> result = builder.ToString();
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -4102,6 +4187,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
answer = Handle<ResultSeqString>::cast(
isolate->factory()->NewRawTwoByteString(new_length));
}
+ ASSERT(!answer.is_null());
int prev = 0;
int position = 0;
@@ -4144,11 +4230,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
if (delta == 0) return *answer;
Address end_of_string = answer->address() + string_size;
- isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
- MemoryChunk::IncrementLiveBytesFromMutator(answer->address(), -delta);
- }
-
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(end_of_string, delta);
+ heap->AdjustLiveBytes(answer->address(), -delta, Heap::FROM_MUTATOR);
return *answer;
}
@@ -4201,8 +4285,8 @@ Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
replace,
found,
recursion_limit - 1);
- if (*found) return isolate->factory()->NewConsString(new_first, second);
if (new_first.is_null()) return new_first;
+ if (*found) return isolate->factory()->NewConsString(new_first, second);
Handle<String> new_second =
StringReplaceOneCharWithString(isolate,
@@ -4211,8 +4295,8 @@ Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
replace,
found,
recursion_limit - 1);
- if (*found) return isolate->factory()->NewConsString(first, new_second);
if (new_second.is_null()) return new_second;
+ if (*found) return isolate->factory()->NewConsString(first, new_second);
return subject;
} else {
@@ -4221,6 +4305,7 @@ Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
*found = true;
Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
Handle<String> cons1 = isolate->factory()->NewConsString(first, replace);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, cons1, Handle<String>());
Handle<String> second =
isolate->factory()->NewSubString(subject, index + 1, subject->length());
return isolate->factory()->NewConsString(cons1, second);
@@ -4246,6 +4331,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
&found,
kRecursionLimit);
if (!result.is_null()) return *result;
+ if (isolate->has_pending_exception()) return Failure::Exception();
return *StringReplaceOneCharWithString(isolate,
FlattenGetString(subject),
search,
@@ -4467,7 +4553,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_SubString) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -4576,7 +4662,7 @@ static MaybeObject* SearchRegExpMultiple(
Handle<FixedArray> cached_fixed_array =
Handle<FixedArray>(FixedArray::cast(*cached_answer));
// The cache FixedArray is a COW-array and can therefore be reused.
- isolate->factory()->SetContent(result_array, cached_fixed_array);
+ JSArray::SetContent(result_array, cached_fixed_array);
// The actual length of the result array is stored in the last element of
// the backing store (the backing FixedArray may have a larger capacity).
Object* cached_fixed_array_last_element =
@@ -4835,21 +4921,13 @@ static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
}
-MaybeObject* Runtime::GetElementOrCharAtOrFail(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
- CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate,
- GetElementOrCharAt(isolate, object, index));
-}
-
-
-MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
+Handle<Object> Runtime::GetElementOrCharAt(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
// Handle [] indexing on Strings
if (object->IsString()) {
Handle<Object> result = GetCharAt(Handle<String>::cast(object), index);
- if (!result->IsUndefined()) return *result;
+ if (!result->IsUndefined()) return result;
}
// Handle [] indexing on String objects
@@ -4857,14 +4935,16 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
Handle<JSValue> js_value = Handle<JSValue>::cast(object);
Handle<Object> result =
GetCharAt(Handle<String>(String::cast(js_value->value())), index);
- if (!result->IsUndefined()) return *result;
+ if (!result->IsUndefined()) return result;
}
+ Handle<Object> result;
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- return object->GetPrototype(isolate)->GetElement(isolate, index);
+ Handle<Object> proto(object->GetPrototype(isolate), isolate);
+ return Object::GetElement(isolate, proto, index);
+ } else {
+ return Object::GetElement(isolate, object, index);
}
-
- return object->GetElement(isolate, index);
}
@@ -4923,7 +5003,9 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index);
+ Handle<Object> result = GetElementOrCharAt(isolate, object, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
// Convert the key to a name - possibly by calling back into JavaScript.
@@ -4933,7 +5015,9 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
// Check if the name is trivially convertible to an index and get
// the element if so.
if (name->AsArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index);
+ Handle<Object> result = GetElementOrCharAt(isolate, object, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
} else {
return object->GetProperty(*name);
}
@@ -4993,8 +5077,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
int offset = result.GetFieldIndex().field_index();
// Do not track double fields in the keyed lookup cache. Reading
// double values requires boxing.
- if (!FLAG_track_double_fields ||
- !result.representation().IsDouble()) {
+ if (!result.representation().IsDouble()) {
keyed_lookup_cache->Update(receiver_map, key, offset);
}
return receiver->FastPropertyAt(result.representation(), offset);
@@ -5129,7 +5212,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
name,
obj_value,
handle(lookup.holder()),
- kStrictMode);
+ STRICT);
RETURN_IF_EMPTY_HANDLE(isolate, result_object);
return *result_object;
}
@@ -5202,7 +5285,7 @@ Handle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
if (object->IsUndefined() || object->IsNull()) {
@@ -5320,7 +5403,7 @@ Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate,
return value;
}
- return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ return JSObject::SetElement(js_object, index, value, attr, SLOPPY,
false,
DEFINE_PROPERTY);
}
@@ -5328,7 +5411,7 @@ Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate,
if (key->IsName()) {
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ return JSObject::SetElement(js_object, index, value, attr, SLOPPY,
false,
DEFINE_PROPERTY);
} else {
@@ -5346,7 +5429,7 @@ Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate,
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ return JSObject::SetElement(js_object, index, value, attr, SLOPPY,
false,
DEFINE_PROPERTY);
} else {
@@ -5399,6 +5482,17 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenProperty) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ return *JSObject::SetHiddenProperty(object, key, value);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
@@ -5413,10 +5507,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
PropertyAttributes attributes =
static_cast<PropertyAttributes>(unchecked_attributes);
- StrictModeFlag strict_mode = kNonStrictMode;
+ StrictMode strict_mode = SLOPPY;
if (args.length() == 5) {
- CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_flag, 4);
- strict_mode = strict_mode_flag;
+ CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_arg, 4);
+ strict_mode = strict_mode_arg;
}
Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
@@ -5595,7 +5689,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
- JSReceiver::DeleteMode delete_mode = (strict_mode == kStrictMode)
+ JSReceiver::DeleteMode delete_mode = strict_mode == STRICT
? JSReceiver::STRICT_DELETION : JSReceiver::NORMAL_DELETION;
Handle<Object> result = JSReceiver::DeleteProperty(object, key, delete_mode);
RETURN_IF_EMPTY_HANDLE(isolate, result);
@@ -5693,13 +5787,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_ARG_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- PropertyAttributes att = object->GetLocalPropertyAttribute(key);
+ PropertyAttributes att = JSReceiver::GetLocalPropertyAttribute(object, key);
if (att == ABSENT || (att & DONT_ENUM) != 0) {
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->false_value();
@@ -5780,10 +5874,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
if (obj->IsJSGlobalProxy()) {
// Only collect names if access is permitted.
if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*obj,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(obj,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(obj, v8::ACCESS_KEYS);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -5800,10 +5894,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
for (int i = 0; i < length; i++) {
// Only collect names if access is permitted.
if (jsproto->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*jsproto,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(jsproto,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(jsproto, v8::ACCESS_KEYS);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -5847,7 +5941,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
}
}
next_copy_index += local_property_count[i];
- if (jsproto->HasHiddenProperties()) {
+
+ // Hidden properties only show up if the filter does not skip strings.
+ if ((filter & STRING) == 0 && JSObject::HasHiddenProperties(jsproto)) {
hidden_strings++;
}
if (i < length - 1) {
@@ -5951,9 +6047,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
if (object->IsJSGlobalProxy()) {
// Do access checks before going to the global object.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(object,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_KEYS);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -6029,7 +6126,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
if (index < n) {
return frame->GetParameter(index);
} else {
- return isolate->initial_object_prototype()->GetElement(isolate, index);
+ Handle<Object> initial_prototype(isolate->initial_object_prototype());
+ Handle<Object> result =
+ Object::GetElement(isolate, initial_prototype, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
@@ -6037,7 +6138,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
if (key->Equals(isolate->heap()->length_string())) return Smi::FromInt(n);
if (key->Equals(isolate->heap()->callee_string())) {
JSFunction* function = frame->function();
- if (!function->shared()->is_classic_mode()) {
+ if (function->shared()->strict_mode() == STRICT) {
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_arguments_callee", HandleVector<Object>(NULL, 0)));
}
@@ -6225,7 +6326,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
Handle<String> result = string->IsOneByteRepresentationUnderneath()
? URIEscape::Escape<uint8_t>(isolate, source)
: URIEscape::Escape<uc16>(isolate, source);
- if (result.is_null()) return Failure::OutOfMemoryException(0x12);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -6285,49 +6386,44 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
}
+static inline bool ToUpperOverflows(uc32 character) {
+ // y with umlauts and the micro sign are the only characters that stop
+ // fitting into one-byte when converting to uppercase.
+ static const uc32 yuml_code = 0xff;
+ static const uc32 micro_code = 0xb5;
+ return (character == yuml_code || character == micro_code);
+}
+
+
template <class Converter>
MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
Isolate* isolate,
- String* s,
- String::Encoding result_encoding,
- int length,
- int input_string_length,
+ String* string,
+ SeqString* result,
+ int result_length,
unibrow::Mapping<Converter, 128>* mapping) {
+ DisallowHeapAllocation no_gc;
// We try this twice, once with the assumption that the result is no longer
// than the input and, if that assumption breaks, again with the exact
// length. This may not be pretty, but it is nicer than what was here before
// and I hereby claim my vaffel-is.
//
- // Allocate the resulting string.
- //
// NOTE: This assumes that the upper/lower case of an ASCII
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- Object* o;
- { MaybeObject* maybe_o = result_encoding == String::ONE_BYTE_ENCODING
- ? isolate->heap()->AllocateRawOneByteString(length)
- : isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* result = String::cast(o);
bool has_changed_character = false;
- DisallowHeapAllocation no_gc;
-
// Convert all characters to upper case, assuming that they will fit
// in the buffer
Access<ConsStringIteratorOp> op(
isolate->runtime_state()->string_iterator());
- StringCharacterStream stream(s, op.value());
+ StringCharacterStream stream(string, op.value());
unibrow::uchar chars[Converter::kMaxWidth];
// We can assume that the string is not empty
uc32 current = stream.GetNext();
- // y with umlauts is the only character that stops fitting into one-byte
- // when converting to uppercase.
- static const uc32 yuml_code = 0xff;
- bool ignore_yuml = result->IsSeqTwoByteString() || Converter::kIsToLower;
- for (int i = 0; i < length;) {
+ bool ignore_overflow = Converter::kIsToLower || result->IsSeqTwoByteString();
+ for (int i = 0; i < result_length;) {
bool has_next = stream.HasMore();
uc32 next = has_next ? stream.GetNext() : 0;
int char_length = mapping->get(current, next, chars);
@@ -6335,14 +6431,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// The case conversion of this character is the character itself.
result->Set(i, current);
i++;
- } else if (char_length == 1 && (ignore_yuml || current != yuml_code)) {
+ } else if (char_length == 1 &&
+ (ignore_overflow || !ToUpperOverflows(current))) {
// Common case: converting the letter resulted in one character.
ASSERT(static_cast<uc32>(chars[0]) != current);
result->Set(i, chars[0]);
has_changed_character = true;
i++;
- } else if (length == input_string_length) {
- bool found_yuml = (current == yuml_code);
+ } else if (result_length == string->length()) {
+ bool overflows = ToUpperOverflows(current);
// We've assumed that the result would be as long as the
// input but here is a character that converts to several
// characters. No matter, we calculate the exact length
@@ -6362,7 +6459,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
int current_length = i + char_length + next_length;
while (stream.HasMore()) {
current = stream.GetNext();
- found_yuml |= (current == yuml_code);
+ overflows |= ToUpperOverflows(current);
// NOTE: we use 0 as the next character here because, while
// the next character may affect what a character converts to,
// it does not in any case affect the length of what it convert
@@ -6370,15 +6467,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
int char_length = mapping->get(current, 0, chars);
if (char_length == 0) char_length = 1;
current_length += char_length;
- if (current_length > Smi::kMaxValue) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x13);
+ if (current_length > String::kMaxLength) {
+ AllowHeapAllocation allocate_error_and_return;
+ return isolate->ThrowInvalidStringLength();
}
}
// Try again with the real length. Return signed if we need
- // to allocate a two-byte string for y-umlaut to uppercase.
- return (found_yuml && !ignore_yuml) ? Smi::FromInt(-current_length)
- : Smi::FromInt(current_length);
+ // to allocate a two-byte string for to uppercase.
+ return (overflows && !ignore_overflow) ? Smi::FromInt(-current_length)
+ : Smi::FromInt(current_length);
} else {
for (int j = 0; j < char_length; j++) {
result->Set(i, chars[j]);
@@ -6395,7 +6492,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// we simple return the result and let the converted string
// become garbage; there is no reason to keep two identical strings
// alive.
- return s;
+ return string;
}
}
@@ -6426,7 +6523,7 @@ static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
#ifdef DEBUG
static bool CheckFastAsciiConvert(char* dst,
- char* src,
+ const char* src,
int length,
bool changed,
bool is_to_lower) {
@@ -6449,12 +6546,12 @@ static bool CheckFastAsciiConvert(char* dst,
template<class Converter>
static bool FastAsciiConvert(char* dst,
- char* src,
+ const char* src,
int length,
bool* changed_out) {
#ifdef DEBUG
char* saved_dst = dst;
- char* saved_src = src;
+ const char* saved_src = src;
#endif
DisallowHeapAllocation no_gc;
// We rely on the distance between upper and lower case letters
@@ -6465,12 +6562,12 @@ static bool FastAsciiConvert(char* dst,
static const char hi = Converter::kIsToLower ? 'Z' + 1 : 'z' + 1;
bool changed = false;
uintptr_t or_acc = 0;
- char* const limit = src + length;
+ const char* const limit = src + length;
#ifdef V8_HOST_CAN_READ_UNALIGNED
// Process the prefix of the input that requires no conversion one
// (machine) word at a time.
while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
or_acc |= w;
if (AsciiRangeMask(w, lo, hi) != 0) {
changed = true;
@@ -6483,7 +6580,7 @@ static bool FastAsciiConvert(char* dst,
// Process the remainder of the input performing conversion when
// required one word at a time.
while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
or_acc |= w;
uintptr_t m = AsciiRangeMask(w, lo, hi);
// The mask has high (7th) bit set in every byte that needs
@@ -6526,13 +6623,12 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
Arguments args,
Isolate* isolate,
unibrow::Mapping<Converter, 128>* mapping) {
- SealHandleScope shs(isolate);
- CONVERT_ARG_CHECKED(String, s, 0);
- s = s->TryFlattenGetString();
-
- const int length = s->length();
+ HandleScope handle_scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ s = FlattenGetString(s);
+ int length = s->length();
// Assume that the string is not empty; we need this assumption later
- if (length == 0) return s;
+ if (length == 0) return *s;
// Simpler handling of ASCII strings.
//
@@ -6540,42 +6636,46 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- if (s->IsSeqOneByteString()) {
- Object* o;
- { MaybeObject* maybe_o = isolate->heap()->AllocateRawOneByteString(length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- SeqOneByteString* result = SeqOneByteString::cast(o);
+ if (s->IsOneByteRepresentationUnderneath()) {
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length);
+ ASSERT(!result.is_null()); // Same length as input.
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat_content = s->GetFlatContent();
+ ASSERT(flat_content.IsFlat());
bool has_changed_character = false;
bool is_ascii = FastAsciiConvert<Converter>(
reinterpret_cast<char*>(result->GetChars()),
- reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()),
+ reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
length,
&has_changed_character);
// If not ASCII, we discard the result and take the 2 byte path.
- if (is_ascii) {
- return has_changed_character ? result : s;
- }
+ if (is_ascii) return has_changed_character ? *result : *s;
}
- String::Encoding result_encoding = s->IsOneByteRepresentation()
- ? String::ONE_BYTE_ENCODING : String::TWO_BYTE_ENCODING;
- Object* answer;
- { MaybeObject* maybe_answer = ConvertCaseHelper(
- isolate, s, result_encoding, length, length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
+ Handle<SeqString> result;
+ if (s->IsOneByteRepresentation()) {
+ result = isolate->factory()->NewRawOneByteString(length);
+ } else {
+ result = isolate->factory()->NewRawTwoByteString(length);
}
- if (answer->IsSmi()) {
- int new_length = Smi::cast(answer)->value();
- if (new_length < 0) {
- result_encoding = String::TWO_BYTE_ENCODING;
- new_length = -new_length;
- }
- MaybeObject* maybe_answer = ConvertCaseHelper(
- isolate, s, result_encoding, new_length, length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
+ ASSERT(!result.is_null()); // Same length as input.
+
+ MaybeObject* maybe = ConvertCaseHelper(isolate, *s, *result, length, mapping);
+ Object* answer;
+ if (!maybe->ToObject(&answer)) return maybe;
+ if (answer->IsString()) return answer;
+
+ ASSERT(answer->IsSmi());
+ length = Smi::cast(answer)->value();
+ if (s->IsOneByteRepresentation() && length > 0) {
+ result = isolate->factory()->NewRawOneByteString(length);
+ } else {
+ if (length < 0) length = -length;
+ result = isolate->factory()->NewRawTwoByteString(length);
}
- return answer;
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return ConvertCaseHelper(isolate, *s, *result, length, mapping);
}
@@ -6591,11 +6691,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
}
-static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
- return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -6608,15 +6703,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
int length = string->length();
int left = 0;
+ UnicodeCache* unicode_cache = isolate->unicode_cache();
if (trimLeft) {
- while (left < length && IsTrimWhiteSpace(string->Get(left))) {
+ while (left < length &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
left++;
}
}
int right = length;
if (trimRight) {
- while (right > left && IsTrimWhiteSpace(string->Get(right - 1))) {
+ while (right > left &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(
+ string->Get(right - 1))) {
right--;
}
}
@@ -6818,7 +6917,7 @@ bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NumberToString) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -6829,7 +6928,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NumberToStringSkipCache) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -6854,24 +6953,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
}
-// ES6 draft 9.1.11
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPositiveInteger) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
- if (number <= 0) {
- return Smi::FromInt(0);
- }
- return isolate->heap()->NumberFromDouble(DoubleToInteger(number));
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -6916,7 +6997,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NumberToSmi) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -6935,7 +7016,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_AllocateHeapNumber) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->AllocateHeapNumber(0);
@@ -7022,13 +7103,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberImul) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StringAdd) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
isolate->counters()->string_add_runtime()->Increment();
- return *isolate->factory()->NewConsString(str1, str2);
+ Handle<String> result = isolate->factory()->NewConsString(str1, str2);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -7075,10 +7158,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- if (!args[1]->IsSmi()) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x14);
- }
+ if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength();
int array_length = args.smi_at(1);
CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
@@ -7152,8 +7232,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
if (increment > String::kMaxLength - position) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x15);
+ return isolate->ThrowInvalidStringLength();
}
position += increment;
}
@@ -7188,20 +7267,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
- if (!args[1]->IsSmi()) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x16);
- }
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength();
int array_length = args.smi_at(1);
- CONVERT_ARG_CHECKED(String, separator, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
+ RUNTIME_ASSERT(array->HasFastObjectElements());
- if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- FixedArray* fixed_array = FixedArray::cast(array->elements());
+ Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
if (fixed_array->length() < array_length) {
array_length = fixed_array->length();
}
@@ -7210,38 +7284,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
return isolate->heap()->empty_string();
} else if (array_length == 1) {
Object* first = fixed_array->get(0);
- if (first->IsString()) return first;
+ RUNTIME_ASSERT(first->IsString());
+ return first;
}
int separator_length = separator->length();
int max_nof_separators =
(String::kMaxLength + separator_length - 1) / separator_length;
if (max_nof_separators < (array_length - 1)) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x17);
+ return isolate->ThrowInvalidStringLength();
}
int length = (array_length - 1) * separator_length;
for (int i = 0; i < array_length; i++) {
Object* element_obj = fixed_array->get(i);
- if (!element_obj->IsString()) {
- // TODO(1161): handle this case.
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
+ RUNTIME_ASSERT(element_obj->IsString());
String* element = String::cast(element_obj);
int increment = element->length();
if (increment > String::kMaxLength - length) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x18);
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ length = kMaxInt; // Provoke exception;
+ break;
}
length += increment;
}
- Object* object;
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- SeqTwoByteString* answer = SeqTwoByteString::cast(object);
+ Handle<SeqTwoByteString> answer =
+ isolate->factory()->NewRawTwoByteString(length);
+ RETURN_IF_EMPTY_HANDLE(isolate, answer);
+
+ DisallowHeapAllocation no_gc;
uc16* sink = answer->GetChars();
#ifdef DEBUG
@@ -7249,13 +7320,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
#endif
String* first = String::cast(fixed_array->get(0));
+ String* seperator_raw = *separator;
int first_length = first->length();
String::WriteToFlat(first, sink, 0, first_length);
sink += first_length;
for (int i = 1; i < array_length; i++) {
ASSERT(sink + separator_length <= end);
- String::WriteToFlat(separator, sink, 0, separator_length);
+ String::WriteToFlat(seperator_raw, sink, 0, separator_length);
sink += separator_length;
String* element = String::cast(fixed_array->get(i));
@@ -7268,7 +7340,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
// Use %_FastAsciiArrayJoin instead.
ASSERT(!answer->IsOneByteRepresentation());
- return answer;
+ return *answer;
}
template <typename Char>
@@ -7327,12 +7399,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
// Find total length of join result.
int string_length = 0;
bool is_ascii = separator->IsOneByteRepresentation();
- int max_string_length;
- if (is_ascii) {
- max_string_length = SeqOneByteString::kMaxLength;
- } else {
- max_string_length = SeqTwoByteString::kMaxLength;
- }
bool overflow = false;
CONVERT_NUMBER_CHECKED(int, elements_length,
Int32, elements_array->length());
@@ -7345,10 +7411,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
int length = string->length();
if (is_ascii && !string->IsOneByteRepresentation()) {
is_ascii = false;
- max_string_length = SeqTwoByteString::kMaxLength;
}
- if (length > max_string_length ||
- max_string_length - length < string_length) {
+ if (length > String::kMaxLength ||
+ String::kMaxLength - length < string_length) {
overflow = true;
break;
}
@@ -7358,7 +7423,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
if (!overflow && separator_length > 0) {
if (array_length <= 0x7fffffffu) {
int separator_count = static_cast<int>(array_length) - 1;
- int remaining_length = max_string_length - string_length;
+ int remaining_length = String::kMaxLength - string_length;
if ((remaining_length / separator_length) >= separator_count) {
string_length += separator_length * (array_length - 1);
} else {
@@ -7376,9 +7441,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
// Throw an exception if the resulting string is too large. See
// https://code.google.com/p/chromium/issues/detail?id=336820
// for details.
- return isolate->Throw(*isolate->factory()->
- NewRangeError("invalid_string_length",
- HandleVector<Object>(NULL, 0)));
+ return isolate->ThrowInvalidStringLength();
}
if (is_ascii) {
@@ -7663,7 +7726,7 @@ static Object* FlatStringCompare(String* x, String* y) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StringCompare) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -7698,33 +7761,48 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
+#define RUNTIME_UNARY_MATH(NAME) \
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_##NAME) { \
+ SealHandleScope shs(isolate); \
+ ASSERT(args.length() == 1); \
+ isolate->counters()->math_##NAME()->Increment(); \
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0); \
+ return isolate->heap()->AllocateHeapNumber(std::NAME(x)); \
+}
+
+RUNTIME_UNARY_MATH(acos)
+RUNTIME_UNARY_MATH(asin)
+RUNTIME_UNARY_MATH(atan)
+RUNTIME_UNARY_MATH(log)
+#undef RUNTIME_UNARY_MATH
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DoubleHi) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- isolate->counters()->math_acos()->Increment();
-
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(std::acos(x));
+ uint64_t integer = double_to_uint64(x);
+ integer = (integer >> 32) & 0xFFFFFFFFu;
+ return isolate->heap()->NumberFromDouble(static_cast<int32_t>(integer));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DoubleLo) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- isolate->counters()->math_asin()->Increment();
-
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(std::asin(x));
+ return isolate->heap()->NumberFromDouble(
+ static_cast<int32_t>(double_to_uint64(x) & 0xFFFFFFFFu));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ConstructDouble) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_atan()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(std::atan(x));
+ ASSERT(args.length() == 2);
+ CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
+ CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
+ uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
+ return isolate->heap()->AllocateHeapNumber(uint64_to_double(result));
}
@@ -7775,16 +7853,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_log()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(std::log(x));
-}
-
-
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
@@ -7880,6 +7948,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_fround) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ float xf = static_cast<float>(x);
+ return isolate->heap()->AllocateHeapNumber(xf);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -7928,7 +8006,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateSetValue) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewArgumentsFast) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -7946,11 +8024,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
Handle<FixedArray> parameter_map =
isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
parameter_map->set_map(
- isolate->heap()->non_strict_arguments_elements_map());
+ isolate->heap()->sloppy_arguments_elements_map());
Handle<Map> old_map(result->map());
Handle<Map> new_map = isolate->factory()->CopyMap(old_map);
- new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
+ new_map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
result->set_map(*new_map);
result->set_elements(*parameter_map);
@@ -8023,7 +8101,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewStrictArgumentsFast) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
@@ -8056,7 +8134,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosureFromStubFailure) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewClosureFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
@@ -8070,7 +8148,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosureFromStubFailure) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewClosure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
@@ -8261,12 +8339,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- Handle<Object> constructor = args.at<Object>(0);
-
+static MaybeObject* Runtime_NewObjectHelper(Isolate* isolate,
+ Handle<Object> constructor,
+ Handle<AllocationSite> site) {
// If the constructor isn't a proper function we throw a type error.
if (!constructor->IsJSFunction()) {
Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
@@ -8324,7 +8399,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
shared->CompleteInobjectSlackTracking();
}
- Handle<JSObject> result = isolate->factory()->NewJSObject(function);
+ Handle<JSObject> result;
+ if (site.is_null()) {
+ result = isolate->factory()->NewJSObject(function);
+ } else {
+ result = isolate->factory()->NewJSObjectWithMemento(function, site);
+ }
RETURN_IF_EMPTY_HANDLE(isolate, result);
isolate->counters()->constructed_objects()->Increment();
@@ -8334,7 +8414,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewObject) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+
+ Handle<Object> constructor = args.at<Object>(0);
+ return Runtime_NewObjectHelper(isolate,
+ constructor,
+ Handle<AllocationSite>::null());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewObjectWithAllocationSite) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+
+ Handle<Object> constructor = args.at<Object>(1);
+ Handle<Object> feedback = args.at<Object>(0);
+ Handle<AllocationSite> site;
+ if (feedback->IsAllocationSite()) {
+ // The feedback can be an AllocationSite or undefined.
+ site = Handle<AllocationSite>::cast(feedback);
+ }
+ return Runtime_NewObjectHelper(isolate,
+ constructor,
+ site);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_FinalizeInstanceSize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8345,7 +8453,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileUnoptimized) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CompileUnoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8374,7 +8482,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileUnoptimized) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileOptimized) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CompileOptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
Handle<JSFunction> function = args.at<JSFunction>(0);
@@ -8437,7 +8545,7 @@ class ActivationsFinder : public ThreadVisitor {
};
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NotifyStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
@@ -8447,7 +8555,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(args[0]->IsSmi());
@@ -8490,6 +8598,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
PrintF("]\n");
}
function->ReplaceCode(function->shared()->code());
+ // Evict optimized code for this function from the cache so that it
+ // doesn't get used for new closures.
+ function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
+ "notify deoptimized");
}
} else {
// TODO(titzer): we should probably do DeoptimizeCodeList(code)
@@ -8497,10 +8609,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
// If there is an index by shared function info, all the better.
Deoptimizer::DeoptimizeFunction(*function);
}
- // Evict optimized code for this function from the cache so that it doesn't
- // get used for new closures.
- function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
- "notify deoptimized");
return isolate->heap()->undefined_value();
}
@@ -8525,7 +8633,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
Code* unoptimized = function->shared()->code();
if (unoptimized->kind() == Code::FUNCTION) {
unoptimized->ClearInlineCaches();
- unoptimized->ClearTypeFeedbackCells(isolate->heap());
+ unoptimized->ClearTypeFeedbackInfo(isolate->heap());
}
return isolate->heap()->undefined_value();
}
@@ -8587,7 +8695,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- ASSERT(!function->IsOptimized());
function->shared()->set_optimization_disabled(true);
return isolate->heap()->undefined_value();
}
@@ -8630,6 +8737,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_UnblockConcurrentRecompilation) {
RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
+ RUNTIME_ASSERT(isolate->concurrent_recompilation_enabled());
isolate->optimizing_compiler_thread()->Unblock();
return isolate->heap()->undefined_value();
}
@@ -8768,7 +8876,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
PrintF(" at AST id %d]\n", ast_id.ToInt());
}
- function->ReplaceCode(function->shared()->code());
+ if (!function->IsOptimized()) {
+ function->ReplaceCode(function->shared()->code());
+ }
return NULL;
}
@@ -8869,6 +8979,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
for (int i = 0; i < argc; ++i) {
argv[i] = Object::GetElement(isolate, arguments, offset + i);
+ RETURN_IF_EMPTY_HANDLE(isolate, argv[i]);
}
bool threw;
@@ -8896,7 +9007,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewGlobalContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -8915,7 +9026,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewFunctionContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -8925,7 +9036,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushWithContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
JSReceiver* extension_object;
@@ -8969,7 +9080,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushCatchContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
String* name = String::cast(args[0]);
@@ -8995,7 +9106,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushBlockContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
@@ -9027,7 +9138,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushModuleContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(index, 0);
@@ -9062,7 +9173,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeclareModules) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0);
@@ -9082,7 +9193,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
case VAR:
case LET:
case CONST:
- case CONST_HARMONY: {
+ case CONST_LEGACY: {
PropertyAttributes attr =
IsImmutableVariableMode(mode) ? FROZEN : SEALED;
Handle<AccessorInfo> info =
@@ -9095,7 +9206,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
case MODULE: {
Object* referenced_context = Context::cast(host_context)->get(index);
Handle<JSModule> value(Context::cast(referenced_context)->module());
- JSReceiver::SetProperty(module, name, value, FROZEN, kStrictMode);
+ JSReceiver::SetProperty(module, name, value, FROZEN, STRICT);
break;
}
case INTERNAL:
@@ -9115,7 +9226,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeleteContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -9301,26 +9412,24 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
}
-RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlot) {
+RUNTIME_FUNCTION(ObjectPair, RuntimeHidden_LoadContextSlot) {
return LoadContextSlotHelper(args, isolate, true);
}
-RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlotNoReferenceError) {
+RUNTIME_FUNCTION(ObjectPair, RuntimeHidden_LoadContextSlotNoReferenceError) {
return LoadContextSlotHelper(args, isolate, false);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StoreContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
Handle<Object> value(args[0], isolate);
CONVERT_ARG_HANDLE_CHECKED(Context, context, 1);
CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
- StrictModeFlag strict_mode = (language_mode == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
+ CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 3);
int index;
PropertyAttributes attributes;
@@ -9347,7 +9456,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
if ((attributes & READ_ONLY) == 0) {
// Context is a fixed array and set cannot fail.
context->set(index, *value);
- } else if (strict_mode == kStrictMode) {
+ } else if (strict_mode == STRICT) {
// Setting read only property in strict mode.
Handle<Object> error =
isolate->factory()->NewTypeError("strict_cannot_assign",
@@ -9369,25 +9478,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
// The property was not found.
ASSERT(attributes == ABSENT);
- if (strict_mode == kStrictMode) {
+ if (strict_mode == STRICT) {
// Throw in strict mode (assignment to undefined variable).
Handle<Object> error =
isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
return isolate->Throw(*error);
}
- // In non-strict mode, the property is added to the global object.
+ // In sloppy mode, the property is added to the global object.
attributes = NONE;
object = Handle<JSReceiver>(isolate->context()->global_object());
}
// Set the property if it's not read only or doesn't yet exist.
if ((attributes & READ_ONLY) == 0 ||
- (object->GetLocalPropertyAttribute(*name) == ABSENT)) {
+ (JSReceiver::GetLocalPropertyAttribute(object, name) == ABSENT)) {
RETURN_IF_EMPTY_HANDLE(
isolate,
JSReceiver::SetProperty(object, name, value, NONE, strict_mode));
- } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
+ } else if (strict_mode == STRICT && (attributes & READ_ONLY) != 0) {
// Setting read only property in strict mode.
Handle<Object> error =
isolate->factory()->NewTypeError(
@@ -9398,7 +9507,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_Throw) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -9406,7 +9515,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ReThrow) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -9414,14 +9523,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PromoteScheduledException) {
SealHandleScope shs(isolate);
ASSERT_EQ(0, args.length());
return isolate->PromoteScheduledException();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowReferenceError) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -9433,7 +9542,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowNotDateError) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
return isolate->Throw(*isolate->factory()->NewTypeError(
@@ -9441,19 +9550,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowMessage) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowMessage) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_SMI_ARG_CHECKED(message_id, 0);
const char* message = GetBailoutReason(
static_cast<BailoutReason>(message_id));
- Handle<Name> message_handle =
+ Handle<String> message_handle =
isolate->factory()->NewStringFromAscii(CStrVector(message));
+ RETURN_IF_EMPTY_HANDLE(isolate, message_handle);
return isolate->Throw(*message_handle);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StackGuard) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
@@ -9466,7 +9576,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallOptimizedCode) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_TryInstallOptimizedCode) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -9483,7 +9593,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallOptimizedCode) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_Interrupt) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return Execution::HandleStackGuardInterrupt(isolate);
@@ -9628,8 +9738,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- int64_t time = isolate->date_cache()->EquivalentTime(static_cast<int64_t>(x));
- const char* zone = OS::LocalTimezone(static_cast<double>(time));
+ const char* zone =
+ isolate->date_cache()->LocalTimezone(static_cast<int64_t>(x));
return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
}
@@ -9645,6 +9755,27 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCacheVersion) {
+ HandleScope hs(isolate);
+ ASSERT(args.length() == 0);
+ if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
+ Handle<FixedArray> date_cache_version =
+ isolate->factory()->NewFixedArray(1, TENURED);
+ date_cache_version->set(0, Smi::FromInt(0));
+ isolate->eternal_handles()->CreateSingleton(
+ isolate, *date_cache_version, EternalHandles::DATE_CACHE_VERSION);
+ }
+ Handle<FixedArray> date_cache_version =
+ Handle<FixedArray>::cast(isolate->eternal_handles()->GetSingleton(
+ EternalHandles::DATE_CACHE_VERSION));
+ // Return result as a JS array.
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObject(isolate->array_function());
+ JSArray::SetContent(Handle<JSArray>::cast(result), date_cache_version);
+ return *result;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -9726,7 +9857,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
ParseRestriction restriction = function_literal_only
? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION;
Handle<JSFunction> fun = Compiler::GetFunctionFromEval(
- source, context, CLASSIC_MODE, restriction, RelocInfo::kNoPosition);
+ source, context, SLOPPY, restriction, RelocInfo::kNoPosition);
RETURN_IF_EMPTY_HANDLE(isolate, fun);
return *fun;
}
@@ -9735,7 +9866,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
static ObjectPair CompileGlobalEval(Isolate* isolate,
Handle<String> source,
Handle<Object> receiver,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
Handle<Context> context = Handle<Context>(isolate->context());
Handle<Context> native_context = Handle<Context>(context->native_context());
@@ -9755,14 +9886,14 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
// and return the compiled function bound in the local context.
static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
Handle<JSFunction> compiled = Compiler::GetFunctionFromEval(
- source, context, language_mode, restriction, scope_position);
+ source, context, strict_mode, restriction, scope_position);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, compiled,
MakePair(Failure::Exception(), NULL));
return MakePair(*compiled, *receiver);
}
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
+RUNTIME_FUNCTION(ObjectPair, RuntimeHidden_ResolvePossiblyDirectEval) {
HandleScope scope(isolate);
ASSERT(args.length() == 5);
@@ -9778,12 +9909,14 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
return MakePair(*callee, isolate->heap()->undefined_value());
}
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
+ ASSERT(args[3]->IsSmi());
+ ASSERT(args.smi_at(3) == SLOPPY || args.smi_at(3) == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(3));
ASSERT(args[4]->IsSmi());
return CompileGlobalEval(isolate,
args.at<String>(1),
args.at<Object>(2),
- language_mode,
+ strict_mode,
args.smi_at(4));
}
@@ -9811,7 +9944,7 @@ static MaybeObject* Allocate(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_AllocateInNewSpace) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_SMI_ARG_CHECKED(size, 0);
@@ -9819,7 +9952,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInTargetSpace) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_AllocateInTargetSpace) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(size, 0);
@@ -9848,7 +9981,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
// Strict not needed. Used for cycle detection in Array join implementation.
RETURN_IF_EMPTY_HANDLE(isolate, JSObject::SetFastElement(array, length,
element,
- kNonStrictMode,
+ SLOPPY,
true));
return isolate->heap()->true_value();
}
@@ -9930,11 +10063,9 @@ class ArrayConcatVisitor {
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
Handle<Map> map;
if (fast_elements_) {
- map = isolate_->factory()->GetElementsTransitionMap(array,
- FAST_HOLEY_ELEMENTS);
+ map = JSObject::GetElementsTransitionMap(array, FAST_HOLEY_ELEMENTS);
} else {
- map = isolate_->factory()->GetElementsTransitionMap(array,
- DICTIONARY_ELEMENTS);
+ map = JSObject::GetElementsTransitionMap(array, DICTIONARY_ELEMENTS);
}
array->set_map(*map);
array->set_length(*length);
@@ -10033,7 +10164,7 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
}
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS: \
@@ -10468,7 +10599,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
Handle<JSArray> array = isolate->factory()->NewJSArray(0);
Smi* length = Smi::FromInt(j);
Handle<Map> map;
- map = isolate->factory()->GetElementsTransitionMap(array, kind);
+ map = JSObject::GetElementsTransitionMap(array, kind);
array->set_map(*map);
array->set_length(length);
array->set_elements(*double_storage);
@@ -10532,6 +10663,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
// and are followed by non-existing element. Does not change the length
// property.
// Returns the number of non-undefined elements collected.
+// Returns -1 if hole removal is not supported by this method.
RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -10792,14 +10924,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
- Object* element_or_char;
- { MaybeObject* maybe_element_or_char =
- Runtime::GetElementOrCharAt(isolate, obj, index);
- if (!maybe_element_or_char->ToObject(&element_or_char)) {
- return maybe_element_or_char;
- }
- }
- details->set(0, element_or_char);
+ Handle<Object> element_or_char =
+ Runtime::GetElementOrCharAt(isolate, obj, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, element_or_char);
+ details->set(0, *element_or_char);
details->set(
1, PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
@@ -10935,8 +11063,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
RUNTIME_ASSERT(obj->HasIndexedInterceptor());
CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
-
- return obj->GetElementWithInterceptor(*obj, index);
+ Handle<Object> result = JSObject::GetElementWithInterceptor(obj, obj, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -11178,8 +11307,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
VariableMode mode;
InitializationFlag init_flag;
locals->set(i * 2, *name);
- locals->set(i * 2 + 1, context->get(
- scope_info->ContextSlotIndex(*name, &mode, &init_flag)));
+ int context_slot_index =
+ scope_info->ContextSlotIndex(*name, &mode, &init_flag);
+ Object* value = context->get(context_slot_index);
+ locals->set(i * 2 + 1, value);
}
}
@@ -11320,7 +11451,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
Handle<Object> receiver(it.frame()->receiver(), isolate);
if (!receiver->IsJSObject() &&
- shared->is_classic_mode() &&
+ shared->strict_mode() == SLOPPY &&
!function->IsBuiltin()) {
// If the receiver is not a JSObject and the function is not a
// builtin or strict-mode we have hit an optimization where a
@@ -11346,6 +11477,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
+static bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
+ int index) {
+ VariableMode mode;
+ InitializationFlag flag;
+ return info->ContextSlotIndex(info->ParameterName(index), &mode, &flag) != -1;
+}
+
+
// Create a plain JSObject which materializes the local scope for the specified
// frame.
static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
@@ -11358,22 +11497,20 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- Handle<String> name(scope_info->ParameterName(i));
- VariableMode mode;
- InitializationFlag init_flag;
// Do not materialize the parameter if it is shadowed by a context local.
- if (scope_info->ContextSlotIndex(*name, &mode, &init_flag) != -1) continue;
+ if (ParameterIsShadowedByContextLocal(scope_info, i)) continue;
+ HandleScope scope(isolate);
Handle<Object> value(i < frame_inspector->GetParametersCount()
? frame_inspector->GetParameter(i)
: isolate->heap()->undefined_value(),
isolate);
ASSERT(!value->IsTheHole());
+ Handle<String> name(scope_info->ParameterName(i));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- Runtime::SetObjectProperty(
- isolate, target, name, value, NONE, kNonStrictMode),
+ Runtime::SetObjectProperty(isolate, target, name, value, NONE, SLOPPY),
Handle<JSObject>());
}
@@ -11385,8 +11522,7 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- Runtime::SetObjectProperty(
- isolate, target, name, value, NONE, kNonStrictMode),
+ Runtime::SetObjectProperty(isolate, target, name, value, NONE, SLOPPY),
Handle<JSObject>());
}
@@ -11411,10 +11547,13 @@ static void UpdateStackLocalsFromMaterializedObject(Isolate* isolate,
// Parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ // Shadowed parameters were not materialized.
+ if (ParameterIsShadowedByContextLocal(scope_info, i)) continue;
+
ASSERT(!frame->GetParameter(i)->IsTheHole());
HandleScope scope(isolate);
- Handle<Object> value = GetProperty(
- isolate, target, Handle<String>(scope_info->ParameterName(i)));
+ Handle<String> name(scope_info->ParameterName(i));
+ Handle<Object> value = GetProperty(isolate, target, name);
frame->SetParameterValue(i, *value);
}
@@ -11469,7 +11608,7 @@ static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
key,
GetProperty(isolate, ext, key),
NONE,
- kNonStrictMode),
+ SLOPPY),
Handle<JSObject>());
}
}
@@ -11570,8 +11709,7 @@ static bool SetLocalVariableValue(Isolate* isolate,
// We don't expect this to do anything except replacing
// property value.
Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
- NONE,
- kNonStrictMode);
+ NONE, SLOPPY);
return true;
}
}
@@ -11619,8 +11757,7 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
isolate,
Runtime::SetObjectProperty(isolate, closure_scope, key,
GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
+ NONE, SLOPPY),
Handle<JSObject>());
}
}
@@ -11652,8 +11789,7 @@ static bool SetClosureVariableValue(Isolate* isolate,
if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing property value.
Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
- NONE,
- kNonStrictMode);
+ NONE, SLOPPY);
return true;
}
}
@@ -11675,8 +11811,7 @@ static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
Runtime::SetObjectProperty(isolate, catch_scope, name, thrown_object,
- NONE,
- kNonStrictMode),
+ NONE, SLOPPY),
Handle<JSObject>());
return catch_scope;
}
@@ -11760,7 +11895,8 @@ class ScopeIterator {
ScopeIterator(Isolate* isolate,
JavaScriptFrame* frame,
- int inlined_jsframe_index)
+ int inlined_jsframe_index,
+ bool ignore_nested_scopes = false)
: isolate_(isolate),
frame_(frame),
inlined_jsframe_index_(inlined_jsframe_index),
@@ -11784,19 +11920,31 @@ class ScopeIterator {
// Return if ensuring debug info failed.
return;
}
- Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
- // Find the break point where execution has stopped.
- BreakLocationIterator break_location_iterator(debug_info,
- ALL_BREAK_LOCATIONS);
- // pc points to the instruction after the current one, possibly a break
- // location as well. So the "- 1" to exclude it from the search.
- break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
- if (break_location_iterator.IsExit()) {
- // We are within the return sequence. At the momemt it is not possible to
+ // Currently it takes too much time to find nested scopes due to script
+ // parsing. Sometimes we want to run the ScopeIterator as fast as possible
+ // (for example, while collecting async call stacks on every
+ // addEventListener call), even if we drop some nested scopes.
+ // Later we may optimize getting the nested scopes (cache the result?)
+ // and include nested scopes into the "fast" iteration case as well.
+ if (!ignore_nested_scopes) {
+ Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
+
+ // Find the break point where execution has stopped.
+ BreakLocationIterator break_location_iterator(debug_info,
+ ALL_BREAK_LOCATIONS);
+ // pc points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
+
+ // Within the return sequence at the moment it is not possible to
// get a source position which is consistent with the current scope chain.
// Thus all nested with, catch and block contexts are skipped and we only
// provide the function scope.
+ ignore_nested_scopes = break_location_iterator.IsExit();
+ }
+
+ if (ignore_nested_scopes) {
if (scope_info->HasContext()) {
context_ = Handle<Context>(context_->declaration_context(), isolate_);
} else {
@@ -11804,7 +11952,7 @@ class ScopeIterator {
context_ = Handle<Context>(context_->previous(), isolate_);
}
}
- if (scope_info->scope_type() != EVAL_SCOPE) {
+ if (scope_info->scope_type() == FUNCTION_SCOPE) {
nested_scope_chain_.Add(scope_info);
}
} else {
@@ -12189,7 +12337,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
Smi* position_value = Smi::FromInt(break_location_iterator.position());
JSObject::SetElement(array, len,
Handle<Object>(position_value, isolate),
- NONE, kNonStrictMode);
+ NONE, SLOPPY);
len++;
}
}
@@ -12209,7 +12357,7 @@ static const int kScopeDetailsObjectIndex = 1;
static const int kScopeDetailsSize = 2;
-static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
+static Handle<JSObject> MaterializeScopeDetails(Isolate* isolate,
ScopeIterator* it) {
// Calculate the size of the result.
int details_size = kScopeDetailsSize;
@@ -12218,10 +12366,10 @@ static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
// Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(it->Type()));
Handle<JSObject> scope_object = it->ScopeObject();
- RETURN_IF_EMPTY_HANDLE(isolate, scope_object);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, scope_object, Handle<JSObject>());
details->set(kScopeDetailsObjectIndex, *scope_object);
- return *isolate->factory()->NewJSArrayWithElements(details);
+ return isolate->factory()->NewJSArrayWithElements(details);
}
@@ -12262,7 +12410,58 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
if (it.Done()) {
return isolate->heap()->undefined_value();
}
- return MaterializeScopeDetails(isolate, &it);
+ Handle<JSObject> details = MaterializeScopeDetails(isolate, &it);
+ RETURN_IF_EMPTY_HANDLE(isolate, details);
+ return *details;
+}
+
+
+// Return an array of scope details
+// args[0]: number: break id
+// args[1]: number: frame index
+// args[2]: number: inlined frame index
+// args[3]: boolean: ignore nested scopes
+//
+// The array returned contains arrays with the following information:
+// 0: Scope type
+// 1: Scope object
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetAllScopesDetails) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3 || args.length() == 4);
+
+ // Check arguments.
+ Object* check;
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
+ if (!maybe_check->ToObject(&check)) return maybe_check;
+ }
+ CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+ CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
+
+ bool ignore_nested_scopes = false;
+ if (args.length() == 4) {
+ CONVERT_BOOLEAN_ARG_CHECKED(flag, 3);
+ ignore_nested_scopes = flag;
+ }
+
+ // Get the frame where the debugging is performed.
+ StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ JavaScriptFrameIterator frame_it(isolate, id);
+ JavaScriptFrame* frame = frame_it.frame();
+
+ List<Handle<JSObject> > result(4);
+ ScopeIterator it(isolate, frame, inlined_jsframe_index, ignore_nested_scopes);
+ for (; !it.Done(); it.Next()) {
+ Handle<JSObject> details = MaterializeScopeDetails(isolate, &it);
+ RETURN_IF_EMPTY_HANDLE(isolate, details);
+ result.Add(details);
+ }
+
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(result.length());
+ for (int i = 0; i < result.length(); ++i) {
+ array->set(i, *result[i]);
+ }
+ return *isolate->factory()->NewJSArrayWithElements(array);
}
@@ -12301,7 +12500,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeDetails) {
return isolate->heap()->undefined_value();
}
- return MaterializeScopeDetails(isolate, &it);
+ Handle<JSObject> details = MaterializeScopeDetails(isolate, &it);
+ RETURN_IF_EMPTY_HANDLE(isolate, details);
+ return *details;
}
@@ -12698,7 +12899,7 @@ static Handle<JSObject> MaterializeArgumentsObject(
isolate->factory()->arguments_string(),
arguments,
::NONE,
- kNonStrictMode);
+ SLOPPY);
return target;
}
@@ -12718,7 +12919,7 @@ static MaybeObject* DebugEvaluate(Isolate* isolate,
Handle<JSFunction> eval_fun =
Compiler::GetFunctionFromEval(source,
context,
- CLASSIC_MODE,
+ SLOPPY,
NO_PARSE_RESTRICTION,
RelocInfo::kNoPosition);
RETURN_IF_EMPTY_HANDLE(isolate, eval_fun);
@@ -12873,7 +13074,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
// Return result as a JS array.
Handle<JSObject> result =
isolate->factory()->NewJSObject(isolate->array_function());
- isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances);
+ JSArray::SetContent(Handle<JSArray>::cast(result), instances);
return *result;
}
@@ -12954,20 +13155,20 @@ static int DebugReferencedBy(HeapIterator* iterator,
// args[1]: constructor function for instances to exclude (Mirror)
// args[2]: the the maximum number of objects to return
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
- isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "%DebugReferencedBy");
+ Heap* heap = isolate->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugReferencedBy");
// The heap iterator reserves the right to do a GC to make the heap iterable.
// Due to the GC above we know it won't need to do that, but it seems cleaner
// to get the heap iterator constructed before we start having unprotected
// Object* locals that are not protected by handles.
// Check parameters.
- CONVERT_ARG_CHECKED(JSObject, target, 0);
- Object* instance_filter = args[1];
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
+ Handle<Object> instance_filter = args.at<Object>(1);
RUNTIME_ASSERT(instance_filter->IsUndefined() ||
instance_filter->IsJSObject());
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
@@ -12975,40 +13176,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Get the constructor function for context extension and arguments array.
- JSObject* arguments_boilerplate =
- isolate->context()->native_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
+ Handle<JSObject> arguments_boilerplate(
+ isolate->context()->native_context()->sloppy_arguments_boilerplate());
+ Handle<JSFunction> arguments_function(
+ JSFunction::cast(arguments_boilerplate->map()->constructor()));
// Get the number of referencing objects.
int count;
- Heap* heap = isolate->heap();
HeapIterator heap_iterator(heap);
count = DebugReferencedBy(&heap_iterator,
- target, instance_filter, max_references,
- NULL, 0, arguments_function);
+ *target, *instance_filter, max_references,
+ NULL, 0, *arguments_function);
// Allocate an array to hold the result.
- Object* object;
- { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* instances = FixedArray::cast(object);
+ Handle<FixedArray> instances = isolate->factory()->NewFixedArray(count);
// Fill the referencing objects.
// AllocateFixedArray above does not make the heap non-iterable.
ASSERT(heap->IsHeapIterable());
HeapIterator heap_iterator2(heap);
count = DebugReferencedBy(&heap_iterator2,
- target, instance_filter, max_references,
- instances, count, arguments_function);
+ *target, *instance_filter, max_references,
+ *instances, count, *arguments_function);
// Return result as JS array.
- Object* result;
- MaybeObject* maybe_result = heap->AllocateJSObject(
+ Handle<JSFunction> constructor(
isolate->context()->native_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- return JSArray::cast(result)->SetContent(instances);
+
+ Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
+ JSArray::SetContent(Handle<JSArray>::cast(result), instances);
+ return *result;
}
@@ -13048,7 +13245,7 @@ static int DebugConstructedBy(HeapIterator* iterator,
// args[0]: the constructor to find instances of
// args[1]: the the maximum number of objects to return
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
@@ -13056,7 +13253,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
// Check parameters.
- CONVERT_ARG_CHECKED(JSFunction, constructor, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
RUNTIME_ASSERT(max_references >= 0);
@@ -13064,34 +13261,29 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
int count;
HeapIterator heap_iterator(heap);
count = DebugConstructedBy(&heap_iterator,
- constructor,
+ *constructor,
max_references,
NULL,
0);
// Allocate an array to hold the result.
- Object* object;
- { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* instances = FixedArray::cast(object);
+ Handle<FixedArray> instances = isolate->factory()->NewFixedArray(count);
- ASSERT(isolate->heap()->IsHeapIterable());
+ ASSERT(heap->IsHeapIterable());
// Fill the referencing objects.
HeapIterator heap_iterator2(heap);
count = DebugConstructedBy(&heap_iterator2,
- constructor,
+ *constructor,
max_references,
- instances,
+ *instances,
count);
// Return result as JS array.
- Object* result;
- { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ Handle<JSFunction> array_function(
isolate->context()->native_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return JSArray::cast(result)->SetContent(instances);
+ Handle<JSObject> result = isolate->factory()->NewJSObject(array_function);
+ JSArray::SetContent(Handle<JSArray>::cast(result), instances);
+ return *result;
}
@@ -13669,14 +13861,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLanguageTagVariants) {
Handle<Name> base =
isolate->factory()->NewStringFromAscii(CStrVector("base"));
for (unsigned int i = 0; i < length; ++i) {
- MaybeObject* maybe_string = input->GetElement(isolate, i);
- Object* locale_id;
- if (!maybe_string->ToObject(&locale_id) || !locale_id->IsString()) {
+ Handle<Object> locale_id = Object::GetElement(isolate, input, i);
+ RETURN_IF_EMPTY_HANDLE(isolate, locale_id);
+ if (!locale_id->IsString()) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
v8::String::Utf8Value utf8_locale_id(
- v8::Utils::ToLocal(Handle<String>(String::cast(locale_id))));
+ v8::Utils::ToLocal(Handle<String>::cast(locale_id)));
UErrorCode error = U_ZERO_ERROR;
@@ -14316,9 +14508,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 2);
- OS::PrintError("abort: %s\n",
- reinterpret_cast<char*>(args[0]) + args.smi_at(1));
+ ASSERT(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ const char* message = GetBailoutReason(
+ static_cast<BailoutReason>(message_id));
+ OS::PrintError("abort: %s\n", message);
isolate->PrintStack(stderr);
OS::Abort();
UNREACHABLE();
@@ -14372,7 +14566,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TryMigrateInstance) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_GetFromCache) {
SealHandleScope shs(isolate);
// This is only called from codegen, so checks might be more lax.
CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
@@ -14494,8 +14688,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
#define COUNT_ENTRY(Name, argc, ressize) + 1
int entry_count = 0
RUNTIME_FUNCTION_LIST(COUNT_ENTRY)
- INLINE_FUNCTION_LIST(COUNT_ENTRY)
- INLINE_RUNTIME_FUNCTION_LIST(COUNT_ENTRY);
+ RUNTIME_HIDDEN_FUNCTION_LIST(COUNT_ENTRY)
+ INLINE_FUNCTION_LIST(COUNT_ENTRY);
#undef COUNT_ENTRY
Factory* factory = isolate->factory();
Handle<FixedArray> elements = factory->NewFixedArray(entry_count);
@@ -14521,9 +14715,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
}
inline_runtime_functions = false;
RUNTIME_FUNCTION_LIST(ADD_ENTRY)
+ // Calling hidden runtime functions should just throw.
+ RUNTIME_HIDDEN_FUNCTION_LIST(ADD_ENTRY)
inline_runtime_functions = true;
INLINE_FUNCTION_LIST(ADD_ENTRY)
- INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
#undef ADD_ENTRY
ASSERT_EQ(index, entry_count);
Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
@@ -14532,16 +14727,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
#endif
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_Log) {
+ HandleScope handle_scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, format, 0);
- CONVERT_ARG_CHECKED(JSArray, elms, 1);
- DisallowHeapAllocation no_gc;
- String::FlatContent format_content = format->GetFlatContent();
- RUNTIME_ASSERT(format_content.IsAscii());
- Vector<const uint8_t> chars = format_content.ToOneByteVector();
- isolate->logger()->LogRuntime(Vector<const char>::cast(chars), elms);
+ CONVERT_ARG_HANDLE_CHECKED(String, format, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, elms, 1);
+
+ SmartArrayPointer<char> format_chars = format->ToCString();
+ isolate->logger()->LogRuntime(
+ Vector<const char>(format_chars.get(), format->length()), elms);
return isolate->heap()->undefined_value();
}
@@ -14564,7 +14758,7 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(NonStrictArgumentsElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
// Properties test sitting with elements tests - not fooling anyone.
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
@@ -14583,6 +14777,17 @@ TYPED_ARRAYS(TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
#undef TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
+#define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, s) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasFixed##Type##Elements) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ return isolate->heap()->ToBoolean(obj->HasFixed##Type##Elements()); \
+ }
+
+TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
+
+#undef FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -14647,6 +14852,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetMicrotaskPending) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RunMicrotasks) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ if (isolate->microtask_pending())
+ Execution::RunMicrotasks(isolate);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetMicrotaskState) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
+ return isolate->heap()->microtask_state();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
@@ -14685,12 +14906,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
- ASSERT(object->IsAccessCheckNeeded());
+ ASSERT(object->map()->is_access_check_needed());
Handle<Object> key = args.at<Object>(2);
SaveContext save(isolate);
isolate->set_context(observer->context());
- if (!isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
+ if (!isolate->MayNamedAccessWrapper(object,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
return isolate->heap()->false_value();
}
bool access_allowed = false;
@@ -14698,11 +14920,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
if (key->ToArrayIndex(&index) ||
(key->IsString() && String::cast(*key)->AsArrayIndex(&index))) {
access_allowed =
- isolate->MayIndexedAccess(*object, index, v8::ACCESS_GET) &&
- isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS);
+ isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_GET) &&
+ isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_HAS);
} else {
- access_allowed = isolate->MayNamedAccess(*object, *key, v8::ACCESS_GET) &&
- isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS);
+ access_allowed =
+ isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_GET) &&
+ isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_HAS);
}
return isolate->heap()->ToBoolean(access_allowed);
}
@@ -14712,12 +14935,14 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
Handle<JSFunction> constructor,
Handle<AllocationSite> site,
Arguments* caller_args) {
+ Factory* factory = isolate->factory();
+
bool holey = false;
bool can_use_type_feedback = true;
if (caller_args->length() == 1) {
- Object* argument_one = (*caller_args)[0];
+ Handle<Object> argument_one = caller_args->at<Object>(0);
if (argument_one->IsSmi()) {
- int value = Smi::cast(argument_one)->value();
+ int value = Handle<Smi>::cast(argument_one)->value();
if (value < 0 || value >= JSObject::kInitialMaxFastElementArray) {
// the array is a dictionary in this case.
can_use_type_feedback = false;
@@ -14730,8 +14955,7 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
}
}
- JSArray* array;
- MaybeObject* maybe_array;
+ Handle<JSArray> array;
if (!site.is_null() && can_use_type_feedback) {
ElementsKind to_kind = site->GetElementsKind();
if (holey && !IsFastHoleyElementsKind(to_kind)) {
@@ -14740,27 +14964,40 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
site->SetElementsKind(to_kind);
}
- maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
- *constructor, site);
- if (!maybe_array->To(&array)) return maybe_array;
+ // We should allocate with an initial map that reflects the allocation site
+ // advice. Therefore we use AllocateJSObjectFromMap instead of passing
+ // the constructor.
+ Handle<Map> initial_map(constructor->initial_map(), isolate);
+ if (to_kind != initial_map->elements_kind()) {
+ initial_map = Map::AsElementsKind(initial_map, to_kind);
+ RETURN_IF_EMPTY_HANDLE(isolate, initial_map);
+ }
+
+ // If we don't care to track arrays of to_kind ElementsKind, then
+ // don't emit a memento for them.
+ Handle<AllocationSite> allocation_site;
+ if (AllocationSite::GetMode(to_kind) == TRACK_ALLOCATION_SITE) {
+ allocation_site = site;
+ }
+
+ array = Handle<JSArray>::cast(factory->NewJSObjectFromMap(
+ initial_map, NOT_TENURED, true, allocation_site));
} else {
- maybe_array = isolate->heap()->AllocateJSObject(*constructor);
- if (!maybe_array->To(&array)) return maybe_array;
+ array = Handle<JSArray>::cast(factory->NewJSObject(constructor));
+
// We might need to transition to holey
ElementsKind kind = constructor->initial_map()->elements_kind();
if (holey && !IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
- maybe_array = array->TransitionElementsKind(kind);
- if (maybe_array->IsFailure()) return maybe_array;
+ JSObject::TransitionElementsKind(array, kind);
}
}
- maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
- DONT_INITIALIZE_ARRAY_ELEMENTS);
- if (maybe_array->IsFailure()) return maybe_array;
+ factory->NewJSArrayStorage(array, 0, 0, DONT_INITIALIZE_ARRAY_ELEMENTS);
+
ElementsKind old_kind = array->GetElementsKind();
- maybe_array = ArrayConstructInitializeElements(array, caller_args);
- if (maybe_array->IsFailure()) return maybe_array;
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ ArrayConstructInitializeElements(array, caller_args));
if (!site.is_null() &&
(old_kind != array->GetElementsKind() ||
!can_use_type_feedback)) {
@@ -14769,11 +15006,11 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
// We must mark the allocationsite as un-inlinable.
site->SetDoNotInlineCall();
}
- return array;
+ return *array;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ArrayConstructor) {
HandleScope scope(isolate);
// If we get 2 arguments then they are the stub parameters (constructor, type
// info). If we get 4, then the first one is a pointer to the arguments
@@ -14810,7 +15047,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalArrayConstructor) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_InternalArrayConstructor) {
HandleScope scope(isolate);
Arguments empty_args(0, NULL);
bool no_caller_args = args.length() == 1;
@@ -14846,25 +15083,44 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MaxSmi) {
FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
+#define FH(name, number_of_args, result_size) \
+ { Runtime::kHidden##name, Runtime::RUNTIME_HIDDEN, NULL, \
+ FUNCTION_ADDR(RuntimeHidden_##name), number_of_args, result_size },
+
+
#define I(name, number_of_args, result_size) \
{ Runtime::kInline##name, Runtime::INLINE, \
"_" #name, NULL, number_of_args, result_size },
+
+#define IO(name, number_of_args, result_size) \
+ { Runtime::kInlineOptimized##name, Runtime::INLINE_OPTIMIZED, \
+ "_" #name, FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
+
+
static const Runtime::Function kIntrinsicFunctions[] = {
RUNTIME_FUNCTION_LIST(F)
+ RUNTIME_HIDDEN_FUNCTION_LIST(FH)
INLINE_FUNCTION_LIST(I)
- INLINE_RUNTIME_FUNCTION_LIST(I)
+ INLINE_OPTIMIZED_FUNCTION_LIST(IO)
};
+#undef IO
+#undef I
+#undef FH
+#undef F
+
MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
Object* dictionary) {
ASSERT(dictionary != NULL);
ASSERT(NameDictionary::cast(dictionary)->NumberOfElements() == 0);
for (int i = 0; i < kNumFunctions; ++i) {
+ const char* name = kIntrinsicFunctions[i].name;
+ if (name == NULL) continue;
Object* name_string;
{ MaybeObject* maybe_name_string =
- heap->InternalizeUtf8String(kIntrinsicFunctions[i].name);
+ heap->InternalizeUtf8String(name);
if (!maybe_name_string->ToObject(&name_string)) return maybe_name_string;
}
NameDictionary* name_dictionary = NameDictionary::cast(dictionary);
@@ -14921,4 +15177,9 @@ void Runtime::PerformGC(Object* result, Isolate* isolate) {
}
+void Runtime::OutOfMemory() {
+ Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true);
+ UNREACHABLE();
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 0506e9d86d..58cd5259c9 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -64,7 +64,7 @@ namespace internal {
F(ToFastProperties, 1, 1) \
F(FinishArrayPrototypeSetup, 1, 1) \
F(SpecialArrayFunctions, 1, 1) \
- F(IsClassicModeFunction, 1, 1) \
+ F(IsSloppyModeFunction, 1, 1) \
F(GetDefaultReceiver, 1, 1) \
\
F(GetPrototype, 1, 1) \
@@ -83,13 +83,6 @@ namespace internal {
F(Apply, 5, 1) \
F(GetFunctionDelegate, 1, 1) \
F(GetConstructorDelegate, 1, 1) \
- F(NewArgumentsFast, 3, 1) \
- F(NewStrictArgumentsFast, 3, 1) \
- F(CompileUnoptimized, 1, 1) \
- F(CompileOptimized, 2, 1) \
- F(TryInstallOptimizedCode, 1, 1) \
- F(NotifyDeoptimized, 1, 1) \
- F(NotifyStubFailure, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
@@ -101,8 +94,6 @@ namespace internal {
F(UnblockConcurrentRecompilation, 0, 1) \
F(CompileForOnStackReplacement, 1, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
- F(AllocateInNewSpace, 1, 1) \
- F(AllocateInTargetSpace, 2, 1) \
F(SetNativeFlag, 1, 1) \
F(SetInlineBuiltinFlag, 1, 1) \
F(StoreArrayLiteralElement, 5, 1) \
@@ -111,7 +102,6 @@ namespace internal {
F(FlattenString, 1, 1) \
F(TryMigrateInstance, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
- F(MaxSmi, 0, 1) \
\
/* Array join support */ \
F(PushIfAbsent, 2, 1) \
@@ -131,15 +121,10 @@ namespace internal {
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1) \
\
- F(NumberToString, 1, 1) \
- F(NumberToStringSkipCache, 1, 1) \
F(NumberToInteger, 1, 1) \
- F(NumberToPositiveInteger, 1, 1) \
F(NumberToIntegerMapMinusZero, 1, 1) \
F(NumberToJSUint32, 1, 1) \
F(NumberToJSInt32, 1, 1) \
- F(NumberToSmi, 1, 1) \
- F(AllocateHeapNumber, 0, 1) \
\
/* Arithmetic operations */ \
F(NumberAdd, 2, 1) \
@@ -151,7 +136,6 @@ namespace internal {
F(NumberAlloc, 0, 1) \
F(NumberImul, 2, 1) \
\
- F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
F(StringBuilderJoin, 3, 1) \
F(SparseJoinWithSeparator, 3, 1) \
@@ -171,27 +155,25 @@ namespace internal {
\
F(NumberCompare, 3, 1) \
F(SmiLexicographicCompare, 2, 1) \
- F(StringCompare, 2, 1) \
\
/* Math */ \
F(Math_acos, 1, 1) \
F(Math_asin, 1, 1) \
F(Math_atan, 1, 1) \
- F(Math_atan2, 2, 1) \
+ F(Math_log, 1, 1) \
+ F(Math_sqrt, 1, 1) \
F(Math_exp, 1, 1) \
F(Math_floor, 1, 1) \
- F(Math_log, 1, 1) \
F(Math_pow, 2, 1) \
F(Math_pow_cfunction, 2, 1) \
+ F(Math_atan2, 2, 1) \
F(RoundNumber, 1, 1) \
- F(Math_sqrt, 1, 1) \
+ F(Math_fround, 1, 1) \
\
/* Regular expressions */ \
F(RegExpCompile, 3, 1) \
- F(RegExpExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpInitializeObject, 5, 1) \
- F(RegExpConstructResult, 3, 1) \
\
/* JSON */ \
F(ParseJson, 1, 1) \
@@ -199,11 +181,9 @@ namespace internal {
F(QuoteJSONString, 1, 1) \
\
/* Strings */ \
- F(StringCharCodeAt, 2, 1) \
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
- F(SubString, 3, 1) \
F(StringReplaceGlobalRegExpWithString, 4, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringMatch, 3, 1) \
@@ -246,7 +226,6 @@ namespace internal {
F(GetAndClearOverflowedStackTrace, 1, 1) \
F(GetV8Version, 0, 1) \
\
- F(ClassOf, 1, 1) \
F(SetCode, 2, 1) \
F(SetExpectedNumberOfProperties, 2, 1) \
\
@@ -264,57 +243,47 @@ namespace internal {
F(DateToUTC, 1, 1) \
F(DateMakeDay, 2, 1) \
F(DateSetValue, 3, 1) \
- \
- /* Numbers */ \
+ F(DateCacheVersion, 0, 1) \
\
/* Globals */ \
F(CompileString, 2, 1) \
- F(GlobalPrint, 1, 1) \
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
F(IsAttachedGlobal, 1, 1) \
- F(ResolvePossiblyDirectEval, 5, 2) \
\
F(SetProperty, -1 /* 4 or 5 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \
F(DefineOrRedefineAccessorProperty, 5, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
F(GetDataProperty, 2, 1) \
+ F(SetHiddenProperty, 3, 1) \
\
/* Arrays */ \
F(RemoveArrayHoles, 2, 1) \
F(GetArrayKeys, 2, 1) \
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
- F(ArrayConstructor, -1, 1) \
- F(InternalArrayConstructor, -1, 1) \
\
/* Getters and Setters */ \
F(LookupAccessor, 3, 1) \
\
- /* Literals */ \
- F(MaterializeRegExpLiteral, 4, 1)\
- F(CreateObjectLiteral, 4, 1) \
- F(CreateArrayLiteral, 4, 1) \
- F(CreateArrayLiteralStubBailout, 3, 1) \
- \
- /* Harmony generators */ \
- F(CreateJSGeneratorObject, 0, 1) \
- F(SuspendJSGeneratorObject, 1, 1) \
- F(ResumeJSGeneratorObject, 3, 1) \
- F(ThrowGeneratorStateError, 1, 1) \
- \
/* ES5 */ \
F(ObjectFreeze, 1, 1) \
\
+ /* Harmony microtasks */ \
+ F(GetMicrotaskState, 0, 1) \
+ \
/* Harmony modules */ \
F(IsJSModule, 1, 1) \
\
/* Harmony symbols */ \
F(CreateSymbol, 1, 1) \
F(CreatePrivateSymbol, 1, 1) \
- F(SymbolName, 1, 1) \
+ F(CreateGlobalPrivateSymbol, 1, 1) \
+ F(NewSymbolWrapper, 1, 1) \
+ F(SymbolDescription, 1, 1) \
+ F(SymbolRegistry, 0, 1) \
F(SymbolIsPrivate, 1, 1) \
\
/* Harmony proxies */ \
@@ -351,6 +320,7 @@ namespace internal {
\
/* Harmony events */ \
F(SetMicrotaskPending, 1, 1) \
+ F(RunMicrotasks, 0, 1) \
\
/* Harmony observe */ \
F(IsObserved, 1, 1) \
@@ -367,7 +337,6 @@ namespace internal {
F(ArrayBufferIsView, 1, 1) \
F(ArrayBufferNeuter, 1, 1) \
\
- F(TypedArrayInitialize, 5, 1) \
F(TypedArrayInitializeFromArrayLike, 4, 1) \
F(TypedArrayGetBuffer, 1, 1) \
F(TypedArrayGetByteLength, 1, 1) \
@@ -375,7 +344,6 @@ namespace internal {
F(TypedArrayGetLength, 1, 1) \
F(TypedArraySetFastCases, 3, 1) \
\
- F(DataViewInitialize, 4, 1) \
F(DataViewGetBuffer, 1, 1) \
F(DataViewGetByteLength, 1, 1) \
F(DataViewGetByteOffset, 1, 1) \
@@ -398,54 +366,22 @@ namespace internal {
F(DataViewSetFloat64, 4, 1) \
\
/* Statements */ \
- F(NewClosure, 3, 1) \
- F(NewClosureFromStubFailure, 1, 1) \
- F(NewObject, 1, 1) \
F(NewObjectFromBound, 1, 1) \
- F(FinalizeInstanceSize, 1, 1) \
- F(Throw, 1, 1) \
- F(ReThrow, 1, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowNotDateError, 0, 1) \
- F(ThrowMessage, 1, 1) \
- F(StackGuard, 0, 1) \
- F(Interrupt, 0, 1) \
- F(PromoteScheduledException, 0, 1) \
- \
- /* Contexts */ \
- F(NewGlobalContext, 2, 1) \
- F(NewFunctionContext, 1, 1) \
- F(PushWithContext, 2, 1) \
- F(PushCatchContext, 3, 1) \
- F(PushBlockContext, 2, 1) \
- F(PushModuleContext, 2, 1) \
- F(DeleteContextSlot, 2, 1) \
- F(LoadContextSlot, 2, 2) \
- F(LoadContextSlotNoReferenceError, 2, 2) \
- F(StoreContextSlot, 4, 1) \
\
/* Declarations and initialization */ \
- F(DeclareGlobals, 3, 1) \
- F(DeclareModules, 1, 1) \
- F(DeclareContextSlot, 4, 1) \
F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
- F(InitializeConstGlobal, 2, 1) \
- F(InitializeConstContextSlot, 3, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
\
/* Debugging */ \
F(DebugPrint, 1, 1) \
+ F(GlobalPrint, 1, 1) \
F(DebugTrace, 0, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
- F(Abort, 2, 1) \
+ F(Abort, 1, 1) \
F(AbortJS, 1, 1) \
- /* Logging */ \
- F(Log, 2, 1) \
/* ES5 */ \
F(LocalKeys, 1, 1) \
- /* Cache suport */ \
- F(GetFromCache, 2, 1) \
\
/* Message objects */ \
F(MessageGetStartPosition, 1, 1) \
@@ -461,7 +397,7 @@ namespace internal {
F(HasFastDoubleElements, 1, 1) \
F(HasFastHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
- F(HasNonStrictArgumentsElements, 1, 1) \
+ F(HasSloppyArgumentsElements, 1, 1) \
F(HasExternalUint8ClampedElements, 1, 1) \
F(HasExternalArrayElements, 1, 1) \
F(HasExternalInt8Elements, 1, 1) \
@@ -472,6 +408,15 @@ namespace internal {
F(HasExternalUint32Elements, 1, 1) \
F(HasExternalFloat32Elements, 1, 1) \
F(HasExternalFloat64Elements, 1, 1) \
+ F(HasFixedUint8ClampedElements, 1, 1) \
+ F(HasFixedInt8Elements, 1, 1) \
+ F(HasFixedUint8Elements, 1, 1) \
+ F(HasFixedInt16Elements, 1, 1) \
+ F(HasFixedUint16Elements, 1, 1) \
+ F(HasFixedInt32Elements, 1, 1) \
+ F(HasFixedUint32Elements, 1, 1) \
+ F(HasFixedFloat32Elements, 1, 1) \
+ F(HasFixedFloat64Elements, 1, 1) \
F(HasFastProperties, 1, 1) \
F(TransitionElementsKind, 2, 1) \
F(HaveSameMap, 2, 1) \
@@ -497,6 +442,7 @@ namespace internal {
F(GetScopeCount, 2, 1) \
F(GetStepInPositions, 2, 1) \
F(GetScopeDetails, 4, 1) \
+ F(GetAllScopesDetails, 4, 1) \
F(GetFunctionScopeCount, 1, 1) \
F(GetFunctionScopeDetails, 2, 1) \
F(SetScopeVariableValue, 6, 1) \
@@ -597,6 +543,7 @@ namespace internal {
// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
// either directly by id (via the code generator), or indirectly
// via a native call by name (from within JS code).
+// Entries have the form F(name, number of arguments, number of return values).
#define RUNTIME_FUNCTION_LIST(F) \
RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
@@ -605,6 +552,90 @@ namespace internal {
RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
+// RUNTIME_HIDDEN_FUNCTION_LIST defines all runtime functions accessed
+// by id from code generator, but not via native call by name.
+// Entries have the form F(name, number of arguments, number of return values).
+#define RUNTIME_HIDDEN_FUNCTION_LIST(F) \
+ F(NumberToString, 1, 1) \
+ F(RegExpConstructResult, 3, 1) \
+ F(RegExpExec, 4, 1) \
+ F(StringAdd, 2, 1) \
+ F(SubString, 3, 1) \
+ F(StringCompare, 2, 1) \
+ F(StringCharCodeAt, 2, 1) \
+ F(Log, 3, 1) \
+ F(GetFromCache, 2, 1) \
+ \
+ /* Compilation */ \
+ F(CompileUnoptimized, 1, 1) \
+ F(CompileOptimized, 2, 1) \
+ F(TryInstallOptimizedCode, 1, 1) \
+ F(NotifyDeoptimized, 1, 1) \
+ F(NotifyStubFailure, 0, 1) \
+ \
+ /* Utilities */ \
+ F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
+ F(AllocateHeapNumber, 0, 1) \
+ F(NumberToSmi, 1, 1) \
+ F(NumberToStringSkipCache, 1, 1) \
+ \
+ F(NewArgumentsFast, 3, 1) \
+ F(NewStrictArgumentsFast, 3, 1) \
+ \
+ /* Harmony generators */ \
+ F(CreateJSGeneratorObject, 0, 1) \
+ F(SuspendJSGeneratorObject, 1, 1) \
+ F(ResumeJSGeneratorObject, 3, 1) \
+ F(ThrowGeneratorStateError, 1, 1) \
+ \
+ /* Arrays */ \
+ F(ArrayConstructor, -1, 1) \
+ F(InternalArrayConstructor, -1, 1) \
+ \
+ /* Literals */ \
+ F(MaterializeRegExpLiteral, 4, 1)\
+ F(CreateObjectLiteral, 4, 1) \
+ F(CreateArrayLiteral, 4, 1) \
+ F(CreateArrayLiteralStubBailout, 3, 1) \
+ \
+ /* Statements */ \
+ F(NewClosure, 3, 1) \
+ F(NewClosureFromStubFailure, 1, 1) \
+ F(NewObject, 1, 1) \
+ F(NewObjectWithAllocationSite, 2, 1) \
+ F(FinalizeInstanceSize, 1, 1) \
+ F(Throw, 1, 1) \
+ F(ReThrow, 1, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowNotDateError, 0, 1) \
+ F(ThrowMessage, 1, 1) \
+ F(StackGuard, 0, 1) \
+ F(Interrupt, 0, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ \
+ /* Contexts */ \
+ F(NewGlobalContext, 2, 1) \
+ F(NewFunctionContext, 1, 1) \
+ F(PushWithContext, 2, 1) \
+ F(PushCatchContext, 3, 1) \
+ F(PushBlockContext, 2, 1) \
+ F(PushModuleContext, 2, 1) \
+ F(DeleteContextSlot, 2, 1) \
+ F(LoadContextSlot, 2, 2) \
+ F(LoadContextSlotNoReferenceError, 2, 2) \
+ F(StoreContextSlot, 4, 1) \
+ \
+ /* Declarations and initialization */ \
+ F(DeclareGlobals, 3, 1) \
+ F(DeclareModules, 1, 1) \
+ F(DeclareContextSlot, 4, 1) \
+ F(InitializeConstGlobal, 2, 1) \
+ F(InitializeConstContextSlot, 3, 1) \
+ \
+ /* Eval */ \
+ F(ResolvePossiblyDirectEval, 5, 2)
+
// ----------------------------------------------------------------------------
// INLINE_FUNCTION_LIST defines all inlined functions accessed
// with a native call of the form %_name from within JS code.
@@ -640,15 +671,7 @@ namespace internal {
F(FastAsciiArrayJoin, 2, 1) \
F(GeneratorNext, 2, 1) \
F(GeneratorThrow, 2, 1) \
- F(DebugBreakInOptimizedCode, 0, 1)
-
-
-// ----------------------------------------------------------------------------
-// INLINE_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code that also have
-// a corresponding runtime function, that is called for slow cases.
-// Entries have the form F(name, number of arguments, number of return values).
-#define INLINE_RUNTIME_FUNCTION_LIST(F) \
+ F(DebugBreakInOptimizedCode, 0, 1) \
F(ClassOf, 1, 1) \
F(StringCharCodeAt, 2, 1) \
F(Log, 3, 1) \
@@ -661,6 +684,21 @@ namespace internal {
F(NumberToString, 1, 1)
+// ----------------------------------------------------------------------------
+// INLINE_OPTIMIZED_FUNCTION_LIST defines all inlined functions accessed
+// with a native call of the form %_name from within JS code that also have
+// a corresponding runtime function, that is called from non-optimized code.
+// Entries have the form F(name, number of arguments, number of return values).
+#define INLINE_OPTIMIZED_FUNCTION_LIST(F) \
+ F(DoubleHi, 1, 1) \
+ F(DoubleLo, 1, 1) \
+ F(ConstructDouble, 2, 1) \
+ F(TypedArrayInitialize, 5, 1) \
+ F(DataViewInitialize, 4, 1) \
+ F(MaxSmi, 0, 1) \
+ F(TypedArrayMaxSizeInHeap, 0, 1)
+
+
//---------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
@@ -712,9 +750,14 @@ class Runtime : public AllStatic {
#define F(name, nargs, ressize) k##name,
RUNTIME_FUNCTION_LIST(F)
#undef F
+#define F(name, nargs, ressize) kHidden##name,
+ RUNTIME_HIDDEN_FUNCTION_LIST(F)
+#undef F
#define F(name, nargs, ressize) kInline##name,
INLINE_FUNCTION_LIST(F)
- INLINE_RUNTIME_FUNCTION_LIST(F)
+#undef F
+#define F(name, nargs, ressize) kInlineOptimized##name,
+ INLINE_OPTIMIZED_FUNCTION_LIST(F)
#undef F
kNumFunctions,
kFirstInlineFunction = kInlineIsSmi
@@ -722,7 +765,9 @@ class Runtime : public AllStatic {
enum IntrinsicType {
RUNTIME,
- INLINE
+ RUNTIME_HIDDEN,
+ INLINE,
+ INLINE_OPTIMIZED
};
// Intrinsic function descriptor.
@@ -771,14 +816,9 @@ class Runtime : public AllStatic {
// Support getting the characters in a string using [] notation as
// in Firefox/SpiderMonkey, Safari and Opera.
- MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
-
- MUST_USE_RESULT static MaybeObject* GetElementOrCharAtOrFail(
- Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
+ static Handle<Object> GetElementOrCharAt(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
static Handle<Object> SetObjectProperty(
Isolate* isolate,
@@ -786,7 +826,7 @@ class Runtime : public AllStatic {
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> ForceSetObjectProperty(
Isolate* isolate,
@@ -848,10 +888,14 @@ class Runtime : public AllStatic {
};
static void ArrayIdToTypeAndSize(int array_id,
- ExternalArrayType *type, size_t *element_size);
+ ExternalArrayType *type,
+ ElementsKind* external_elements_kind,
+ ElementsKind* fixed_elements_kind,
+ size_t *element_size);
// Helper functions used stubs.
static void PerformGC(Object* result, Isolate* isolate);
+ static void OutOfMemory();
// Used in runtime.cc and hydrogen's VisitArrayLiteral.
static Handle<Object> CreateArrayLiteralBoilerplate(
@@ -864,12 +908,12 @@ class Runtime : public AllStatic {
//---------------------------------------------------------------------------
// Constants used by interface to runtime functions.
-class AllocateDoubleAlignFlag: public BitField<bool, 0, 1> {};
-class AllocateTargetSpace: public BitField<AllocationSpace, 1, 3> {};
+class AllocateDoubleAlignFlag: public BitField<bool, 0, 1> {};
+class AllocateTargetSpace: public BitField<AllocationSpace, 1, 3> {};
-class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
-class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
-class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
+class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
+class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
+class DeclareGlobalsStrictMode: public BitField<StrictMode, 2, 1> {};
} } // namespace v8::internal
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 2a949ae8d1..a49bc8448d 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -75,11 +75,8 @@ function EQUALS(y) {
y = %ToPrimitive(y, NO_HINT);
}
} else if (IS_SYMBOL(x)) {
- while (true) {
- if (IS_SYMBOL(y)) return %_ObjectEquals(x, y) ? 0 : 1;
- if (!IS_SPEC_OBJECT(y)) return 1; // not equal
- y = %ToPrimitive(y, NO_HINT);
- }
+ if (IS_SYMBOL(y)) return %_ObjectEquals(x, y) ? 0 : 1;
+ return 1; // not equal
} else if (IS_BOOLEAN(x)) {
if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
if (IS_NULL_OR_UNDEFINED(y)) return 1;
@@ -97,6 +94,7 @@ function EQUALS(y) {
return %_ObjectEquals(x, y) ? 0 : 1;
}
if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ if (IS_SYMBOL(y)) return 1; // not equal
if (IS_BOOLEAN(y)) y = %ToNumber(y);
x = %ToPrimitive(x, NO_HINT);
}
@@ -501,7 +499,7 @@ function ToPrimitive(x, hint) {
if (IS_STRING(x)) return x;
// Normal behavior.
if (!IS_SPEC_OBJECT(x)) return x;
- if (IS_SYMBOL_WRAPPER(x)) return %_ValueOf(x);
+ if (IS_SYMBOL_WRAPPER(x)) throw MakeTypeError('symbol_to_primitive', []);
if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
}
@@ -548,6 +546,7 @@ function ToString(x) {
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
if (IS_UNDEFINED(x)) return 'undefined';
+ if (IS_SYMBOL(x)) throw %MakeTypeError('symbol_to_string', []);
return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
}
@@ -555,6 +554,7 @@ function NonStringToString(x) {
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
if (IS_UNDEFINED(x)) return 'undefined';
+ if (IS_SYMBOL(x)) throw %MakeTypeError('symbol_to_string', []);
return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
}
@@ -568,9 +568,9 @@ function ToName(x) {
// ECMA-262, section 9.9, page 36.
function ToObject(x) {
if (IS_STRING(x)) return new $String(x);
- if (IS_SYMBOL(x)) return new $Symbol(x);
if (IS_NUMBER(x)) return new $Number(x);
if (IS_BOOLEAN(x)) return new $Boolean(x);
+ if (IS_SYMBOL(x)) return %NewSymbolWrapper(x);
if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
throw %MakeTypeError('undefined_or_null_to_object', []);
}
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index cb98b6fdcf..c6830e6904 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -54,7 +54,8 @@
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
@@ -97,6 +98,18 @@ typedef struct ucontext {
// Other fields are not used by V8, don't define them here.
} ucontext_t;
+#elif defined(__aarch64__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint64_t uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
typedef struct {
@@ -146,6 +159,23 @@ typedef struct ucontext {
// Other fields are not used by V8, don't define them here.
} ucontext_t;
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
+
+#elif defined(__x86_64__)
+// x64 version for Android.
+typedef struct {
+ uint64_t gregs[23];
+ void* fpregs;
+ uint64_t __reserved1[8];
+} mcontext_t;
+
+typedef struct ucontext {
+ uint64_t uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
#endif
#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
@@ -226,13 +256,27 @@ class SimulatorHelper {
}
inline void FillRegisters(RegisterState* state) {
+#if V8_TARGET_ARCH_ARM
state->pc = reinterpret_cast<Address>(simulator_->get_pc());
state->sp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::sp));
-#if V8_TARGET_ARCH_ARM
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::r11));
+#elif V8_TARGET_ARCH_ARM64
+ if (simulator_->sp() == 0 || simulator_->fp() == 0) {
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. ARM64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ return;
+ }
+ state->pc = reinterpret_cast<Address>(simulator_->pc());
+ state->sp = reinterpret_cast<Address>(simulator_->sp());
+ state->fp = reinterpret_cast<Address>(simulator_->fp());
#elif V8_TARGET_ARCH_MIPS
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ state->sp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#endif
@@ -329,6 +373,11 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
SimulatorHelper helper;
if (!helper.Init(sampler, isolate)) return;
helper.FillRegisters(&state);
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. ARM64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ if (state.sp == 0 || state.fp == 0) return;
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -358,6 +407,11 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#elif V8_HOST_ARCH_ARM64
+ state.pc = reinterpret_cast<Address>(mcontext.pc);
+ state.sp = reinterpret_cast<Address>(mcontext.sp);
+ // FP is an alias for x29.
+ state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
#elif V8_HOST_ARCH_MIPS
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 26f840b23a..48bfd33269 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -35,6 +35,8 @@
#include "char-predicates-inl.h"
#include "conversions-inl.h"
#include "list-inl.h"
+#include "v8.h"
+#include "parser.h"
namespace v8 {
namespace internal {
@@ -246,7 +248,8 @@ Token::Value Scanner::Next() {
}
-static inline bool IsByteOrderMark(uc32 c) {
+// TODO(yangguo): check whether this is actually necessary.
+static inline bool IsLittleEndianByteOrderMark(uc32 c) {
// The Unicode value U+FFFE is guaranteed never to be assigned as a
// Unicode character; this implies that in a Unicode context the
// 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
@@ -254,7 +257,7 @@ static inline bool IsByteOrderMark(uc32 c) {
// not be a U+FFFE character expressed in big-endian byte
// order). Nevertheless, we check for it to be compatible with
// Spidermonkey.
- return c == 0xFEFF || c == 0xFFFE;
+ return c == 0xFFFE;
}
@@ -262,14 +265,14 @@ bool Scanner::SkipWhiteSpace() {
int start_position = source_pos();
while (true) {
- // We treat byte-order marks (BOMs) as whitespace for better
- // compatibility with Spidermonkey and other JavaScript engines.
- while (unicode_cache_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
- // IsWhiteSpace() includes line terminators!
+ while (true) {
+ // Advance as long as character is a WhiteSpace or LineTerminator.
+ // Remember if the latter is the case.
if (unicode_cache_->IsLineTerminator(c0_)) {
- // Ignore line terminators, but remember them. This is necessary
- // for automatic semicolon insertion.
has_line_terminator_before_next_ = true;
+ } else if (!unicode_cache_->IsWhiteSpace(c0_) &&
+ !IsLittleEndianByteOrderMark(c0_)) {
+ break;
}
Advance();
}
@@ -906,7 +909,7 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
KEYWORD("yield", Token::YIELD)
-static Token::Value KeywordOrIdentifierToken(const char* input,
+static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
int input_length,
bool harmony_scoping,
bool harmony_modules) {
@@ -981,8 +984,8 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
literal.Complete();
- if (next_.literal_chars->is_ascii()) {
- Vector<const char> chars = next_.literal_chars->ascii_literal();
+ if (next_.literal_chars->is_one_byte()) {
+ Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
return KeywordOrIdentifierToken(chars.start(),
chars.length(),
harmony_scoping_,
@@ -1113,21 +1116,74 @@ bool Scanner::ScanRegExpFlags() {
}
-int DuplicateFinder::AddAsciiSymbol(Vector<const char> key, int value) {
- return AddSymbol(Vector<const byte>::cast(key), true, value);
+Handle<String> Scanner::AllocateNextLiteralString(Isolate* isolate,
+ PretenureFlag tenured) {
+ if (is_next_literal_one_byte()) {
+ return isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>::cast(next_literal_one_byte_string()), tenured);
+ } else {
+ return isolate->factory()->NewStringFromTwoByte(
+ next_literal_two_byte_string(), tenured);
+ }
+}
+
+
+Handle<String> Scanner::AllocateInternalizedString(Isolate* isolate) {
+ if (is_literal_one_byte()) {
+ return isolate->factory()->InternalizeOneByteString(
+ literal_one_byte_string());
+ } else {
+ return isolate->factory()->InternalizeTwoByteString(
+ literal_two_byte_string());
+ }
+}
+
+
+double Scanner::DoubleValue() {
+ ASSERT(is_literal_one_byte());
+ return StringToDouble(
+ unicode_cache_, Vector<const char>::cast(literal_one_byte_string()),
+ ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
+}
+
+
+int Scanner::FindNumber(DuplicateFinder* finder, int value) {
+ return finder->AddNumber(literal_one_byte_string(), value);
+}
+
+
+int Scanner::FindSymbol(DuplicateFinder* finder, int value) {
+ if (is_literal_one_byte()) {
+ return finder->AddOneByteSymbol(literal_one_byte_string(), value);
+ }
+ return finder->AddTwoByteSymbol(literal_two_byte_string(), value);
+}
+
+
+void Scanner::LogSymbol(ParserRecorder* log, int position) {
+ if (is_literal_one_byte()) {
+ log->LogOneByteSymbol(position, literal_one_byte_string());
+ } else {
+ log->LogTwoByteSymbol(position, literal_two_byte_string());
+ }
+}
+
+
+int DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key, int value) {
+ return AddSymbol(key, true, value);
}
-int DuplicateFinder::AddUtf16Symbol(Vector<const uint16_t> key, int value) {
- return AddSymbol(Vector<const byte>::cast(key), false, value);
+int DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key, int value) {
+ return AddSymbol(Vector<const uint8_t>::cast(key), false, value);
}
-int DuplicateFinder::AddSymbol(Vector<const byte> key,
- bool is_ascii,
+int DuplicateFinder::AddSymbol(Vector<const uint8_t> key,
+ bool is_one_byte,
int value) {
- uint32_t hash = Hash(key, is_ascii);
- byte* encoding = BackupKey(key, is_ascii);
+ uint32_t hash = Hash(key, is_one_byte);
+ byte* encoding = BackupKey(key, is_one_byte);
HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
entry->value =
@@ -1136,15 +1192,16 @@ int DuplicateFinder::AddSymbol(Vector<const byte> key,
}
-int DuplicateFinder::AddNumber(Vector<const char> key, int value) {
+int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) {
ASSERT(key.length() > 0);
// Quick check for already being in canonical form.
if (IsNumberCanonical(key)) {
- return AddAsciiSymbol(key, value);
+ return AddOneByteSymbol(key, value);
}
int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
- double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
+ double double_value = StringToDouble(
+ unicode_constants_, Vector<const char>::cast(key), flags, 0.0);
int length;
const char* string;
if (!std::isfinite(double_value)) {
@@ -1160,7 +1217,7 @@ int DuplicateFinder::AddNumber(Vector<const char> key, int value) {
}
-bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
+bool DuplicateFinder::IsNumberCanonical(Vector<const uint8_t> number) {
// Test for a safe approximation of number literals that are already
// in canonical form: max 15 digits, no leading zeroes, except an
// integer part that is a single zero, and no trailing zeros below
@@ -1179,7 +1236,7 @@ bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
pos++;
bool invalid_last_digit = true;
while (pos < length) {
- byte digit = number[pos] - '0';
+ uint8_t digit = number[pos] - '0';
if (digit > '9' - '0') return false;
invalid_last_digit = (digit == 0);
pos++;
@@ -1188,11 +1245,11 @@ bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
}
-uint32_t DuplicateFinder::Hash(Vector<const byte> key, bool is_ascii) {
+uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
// Primitive hash function, almost identical to the one used
// for strings (except that it's seeded by the length and ASCII-ness).
int length = key.length();
- uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
+ uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0) ;
for (int i = 0; i < length; i++) {
uint32_t c = key[i];
hash = (hash + c) * 1025;
@@ -1210,39 +1267,42 @@ bool DuplicateFinder::Match(void* first, void* second) {
// was ASCII.
byte* s1 = reinterpret_cast<byte*>(first);
byte* s2 = reinterpret_cast<byte*>(second);
- uint32_t length_ascii_field = 0;
+ uint32_t length_one_byte_field = 0;
byte c1;
do {
c1 = *s1;
if (c1 != *s2) return false;
- length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
+ length_one_byte_field = (length_one_byte_field << 7) | (c1 & 0x7f);
s1++;
s2++;
} while ((c1 & 0x80) != 0);
- int length = static_cast<int>(length_ascii_field >> 1);
+ int length = static_cast<int>(length_one_byte_field >> 1);
return memcmp(s1, s2, length) == 0;
}
-byte* DuplicateFinder::BackupKey(Vector<const byte> bytes,
- bool is_ascii) {
- uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
+byte* DuplicateFinder::BackupKey(Vector<const uint8_t> bytes,
+ bool is_one_byte) {
+ uint32_t one_byte_length = (bytes.length() << 1) | (is_one_byte ? 1 : 0);
backing_store_.StartSequence();
- // Emit ascii_length as base-128 encoded number, with the 7th bit set
+ // Emit one_byte_length as base-128 encoded number, with the 7th bit set
// on the byte of every heptet except the last, least significant, one.
- if (ascii_length >= (1 << 7)) {
- if (ascii_length >= (1 << 14)) {
- if (ascii_length >= (1 << 21)) {
- if (ascii_length >= (1 << 28)) {
- backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
+ if (one_byte_length >= (1 << 7)) {
+ if (one_byte_length >= (1 << 14)) {
+ if (one_byte_length >= (1 << 21)) {
+ if (one_byte_length >= (1 << 28)) {
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 28) | 0x80));
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 21) | 0x80u));
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 14) | 0x80u));
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
+ backing_store_.Add(static_cast<uint8_t>((one_byte_length >> 7) | 0x80u));
}
- backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
+ backing_store_.Add(static_cast<uint8_t>(one_byte_length & 0x7f));
backing_store_.AddBlock(bytes);
return backing_store_.EndSequence().start();
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 3cefc833ac..73026ab5a8 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -44,6 +44,9 @@ namespace v8 {
namespace internal {
+class ParserRecorder;
+
+
// Returns the value (0 .. 15) of a hexadecimal character c.
// If c is not a legal hexadecimal character, returns a value < 0.
inline int HexValue(uc32 c) {
@@ -117,8 +120,8 @@ class Utf16CharacterStream {
virtual bool ReadBlock() = 0;
virtual unsigned SlowSeekForward(unsigned code_unit_count) = 0;
- const uc16* buffer_cursor_;
- const uc16* buffer_end_;
+ const uint16_t* buffer_cursor_;
+ const uint16_t* buffer_end_;
unsigned pos_;
};
@@ -139,12 +142,17 @@ class UnicodeCache {
bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
+ bool IsWhiteSpaceOrLineTerminator(unibrow::uchar c) {
+ return kIsWhiteSpaceOrLineTerminator.get(c);
+ }
private:
unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
- unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+ unibrow::Predicate<WhiteSpace, 128> kIsWhiteSpace;
+ unibrow::Predicate<WhiteSpaceOrLineTerminator, 128>
+ kIsWhiteSpaceOrLineTerminator;
StaticResource<Utf8Decoder> utf8_decoder_;
DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
@@ -161,32 +169,32 @@ class DuplicateFinder {
backing_store_(16),
map_(&Match) { }
- int AddAsciiSymbol(Vector<const char> key, int value);
- int AddUtf16Symbol(Vector<const uint16_t> key, int value);
+ int AddOneByteSymbol(Vector<const uint8_t> key, int value);
+ int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
// Add a a number literal by converting it (if necessary)
// to the string that ToString(ToNumber(literal)) would generate.
// and then adding that string with AddAsciiSymbol.
// This string is the actual value used as key in an object literal,
// and the one that must be different from the other keys.
- int AddNumber(Vector<const char> key, int value);
+ int AddNumber(Vector<const uint8_t> key, int value);
private:
- int AddSymbol(Vector<const byte> key, bool is_ascii, int value);
+ int AddSymbol(Vector<const uint8_t> key, bool is_one_byte, int value);
// Backs up the key and its length in the backing store.
// The backup is stored with a base 127 encoding of the
- // length (plus a bit saying whether the string is ASCII),
+ // length (plus a bit saying whether the string is one byte),
// followed by the bytes of the key.
- byte* BackupKey(Vector<const byte> key, bool is_ascii);
+ uint8_t* BackupKey(Vector<const uint8_t> key, bool is_one_byte);
// Compare two encoded keys (both pointing into the backing store)
// for having the same base-127 encoded lengths and ASCII-ness,
// and then having the same 'length' bytes following.
static bool Match(void* first, void* second);
// Creates a hash from a sequence of bytes.
- static uint32_t Hash(Vector<const byte> key, bool is_ascii);
+ static uint32_t Hash(Vector<const uint8_t> key, bool is_one_byte);
// Checks whether a string containing a JS number is its canonical
// form.
- static bool IsNumberCanonical(Vector<const char> key);
+ static bool IsNumberCanonical(Vector<const uint8_t> key);
// Size of buffer. Sufficient for using it to call DoubleToCString in
// from conversions.h.
@@ -206,7 +214,7 @@ class DuplicateFinder {
class LiteralBuffer {
public:
- LiteralBuffer() : is_ascii_(true), position_(0), backing_store_() { }
+ LiteralBuffer() : is_one_byte_(true), position_(0), backing_store_() { }
~LiteralBuffer() {
if (backing_store_.length() > 0) {
@@ -216,48 +224,48 @@ class LiteralBuffer {
INLINE(void AddChar(uint32_t code_unit)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
- if (is_ascii_) {
+ if (is_one_byte_) {
if (code_unit <= unibrow::Latin1::kMaxChar) {
backing_store_[position_] = static_cast<byte>(code_unit);
position_ += kOneByteSize;
return;
}
- ConvertToUtf16();
+ ConvertToTwoByte();
}
ASSERT(code_unit < 0x10000u);
- *reinterpret_cast<uc16*>(&backing_store_[position_]) = code_unit;
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
position_ += kUC16Size;
}
- bool is_ascii() { return is_ascii_; }
+ bool is_one_byte() { return is_one_byte_; }
bool is_contextual_keyword(Vector<const char> keyword) {
- return is_ascii() && keyword.length() == position_ &&
+ return is_one_byte() && keyword.length() == position_ &&
(memcmp(keyword.start(), backing_store_.start(), position_) == 0);
}
- Vector<const uc16> utf16_literal() {
- ASSERT(!is_ascii_);
+ Vector<const uint16_t> two_byte_literal() {
+ ASSERT(!is_one_byte_);
ASSERT((position_ & 0x1) == 0);
- return Vector<const uc16>(
- reinterpret_cast<const uc16*>(backing_store_.start()),
+ return Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(backing_store_.start()),
position_ >> 1);
}
- Vector<const char> ascii_literal() {
- ASSERT(is_ascii_);
- return Vector<const char>(
- reinterpret_cast<const char*>(backing_store_.start()),
+ Vector<const uint8_t> one_byte_literal() {
+ ASSERT(is_one_byte_);
+ return Vector<const uint8_t>(
+ reinterpret_cast<const uint8_t*>(backing_store_.start()),
position_);
}
int length() {
- return is_ascii_ ? position_ : (position_ >> 1);
+ return is_one_byte_ ? position_ : (position_ >> 1);
}
void Reset() {
position_ = 0;
- is_ascii_ = true;
+ is_one_byte_ = true;
}
private:
@@ -278,8 +286,8 @@ class LiteralBuffer {
backing_store_ = new_store;
}
- void ConvertToUtf16() {
- ASSERT(is_ascii_);
+ void ConvertToTwoByte() {
+ ASSERT(is_one_byte_);
Vector<byte> new_store;
int new_content_size = position_ * kUC16Size;
if (new_content_size >= backing_store_.length()) {
@@ -290,7 +298,7 @@ class LiteralBuffer {
new_store = backing_store_;
}
uint8_t* src = backing_store_.start();
- uc16* dst = reinterpret_cast<uc16*>(new_store.start());
+ uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.start());
for (int i = position_ - 1; i >= 0; i--) {
dst[i] = src[i];
}
@@ -299,10 +307,10 @@ class LiteralBuffer {
backing_store_ = new_store;
}
position_ = new_content_size;
- is_ascii_ = false;
+ is_one_byte_ = false;
}
- bool is_ascii_;
+ bool is_one_byte_;
int position_;
Vector<byte> backing_store_;
@@ -365,32 +373,13 @@ class Scanner {
// Returns the location information for the current token
// (the token last returned by Next()).
Location location() const { return current_.location; }
- // Returns the literal string, if any, for the current token (the
- // token last returned by Next()). The string is 0-terminated.
- // Literal strings are collected for identifiers, strings, and
- // numbers.
- // These functions only give the correct result if the literal
- // was scanned between calls to StartLiteral() and TerminateLiteral().
- Vector<const char> literal_ascii_string() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->ascii_literal();
- }
- Vector<const uc16> literal_utf16_string() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->utf16_literal();
- }
- bool is_literal_ascii() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_ascii();
- }
- bool is_literal_contextual_keyword(Vector<const char> keyword) {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_contextual_keyword(keyword);
- }
- int literal_length() const {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->length();
- }
+
+ // Similar functions for the upcoming token.
+
+ // One token look-ahead (past the token returned by Next()).
+ Token::Value peek() const { return next_.token; }
+
+ Location peek_location() const { return next_.location; }
bool literal_contains_escapes() const {
Location location = current_.location;
@@ -401,43 +390,47 @@ class Scanner {
}
return current_.literal_chars->length() != source_length;
}
-
- // Similar functions for the upcoming token.
-
- // One token look-ahead (past the token returned by Next()).
- Token::Value peek() const { return next_.token; }
-
- Location peek_location() const { return next_.location; }
-
- // Returns the literal string for the next token (the token that
- // would be returned if Next() were called).
- Vector<const char> next_literal_ascii_string() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->ascii_literal();
- }
- Vector<const uc16> next_literal_utf16_string() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->utf16_literal();
- }
- bool is_next_literal_ascii() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->is_ascii();
+ bool is_literal_contextual_keyword(Vector<const char> keyword) {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->is_contextual_keyword(keyword);
}
bool is_next_contextual_keyword(Vector<const char> keyword) {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->is_contextual_keyword(keyword);
}
- int next_literal_length() const {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->length();
+
+ Handle<String> AllocateNextLiteralString(Isolate* isolate,
+ PretenureFlag tenured);
+ Handle<String> AllocateInternalizedString(Isolate* isolate);
+
+ double DoubleValue();
+ bool UnescapedLiteralMatches(const char* data, int length) {
+ if (is_literal_one_byte() &&
+ literal_length() == length &&
+ !literal_contains_escapes()) {
+ const char* token =
+ reinterpret_cast<const char*>(literal_one_byte_string().start());
+ return !strncmp(token, data, length);
+ }
+ return false;
+ }
+ void IsGetOrSet(bool* is_get, bool* is_set) {
+ if (is_literal_one_byte() &&
+ literal_length() == 3 &&
+ !literal_contains_escapes()) {
+ const char* token =
+ reinterpret_cast<const char*>(literal_one_byte_string().start());
+ *is_get = strncmp(token, "get", 3) == 0;
+ *is_set = !*is_get && strncmp(token, "set", 3) == 0;
+ }
}
- UnicodeCache* unicode_cache() { return unicode_cache_; }
+ int FindNumber(DuplicateFinder* finder, int value);
+ int FindSymbol(DuplicateFinder* finder, int value);
- static const int kCharacterLookaheadBufferSize = 1;
+ void LogSymbol(ParserRecorder* log, int position);
- // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
- uc32 ScanOctalEscape(uc32 c, int length);
+ UnicodeCache* unicode_cache() { return unicode_cache_; }
// Returns the location of the last seen octal literal.
Location octal_position() const { return octal_pos_; }
@@ -490,6 +483,11 @@ class Scanner {
LiteralBuffer* literal_chars;
};
+ static const int kCharacterLookaheadBufferSize = 1;
+
+ // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+ uc32 ScanOctalEscape(uc32 c, int length);
+
// Call this after setting source_ to the input.
void Init() {
// Set c0_ (one character ahead)
@@ -550,6 +548,47 @@ class Scanner {
}
}
+ // Returns the literal string, if any, for the current token (the
+ // token last returned by Next()). The string is 0-terminated.
+ // Literal strings are collected for identifiers, strings, and
+ // numbers.
+ // These functions only give the correct result if the literal
+ // was scanned between calls to StartLiteral() and TerminateLiteral().
+ Vector<const uint8_t> literal_one_byte_string() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->one_byte_literal();
+ }
+ Vector<const uint16_t> literal_two_byte_string() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->two_byte_literal();
+ }
+ bool is_literal_one_byte() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->is_one_byte();
+ }
+ int literal_length() const {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->length();
+ }
+ // Returns the literal string for the next token (the token that
+ // would be returned if Next() were called).
+ Vector<const uint8_t> next_literal_one_byte_string() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->one_byte_literal();
+ }
+ Vector<const uint16_t> next_literal_two_byte_string() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->two_byte_literal();
+ }
+ bool is_next_literal_one_byte() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->is_one_byte();
+ }
+ int next_literal_length() const {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->length();
+ }
+
uc32 ScanHexNumber(int expected_length);
// Scans a single JavaScript token.
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 03e69bf384..e2ae85432a 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -78,7 +78,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
// Encode the flags.
int flags = ScopeTypeField::encode(scope->scope_type()) |
CallsEvalField::encode(scope->calls_eval()) |
- LanguageModeField::encode(scope->language_mode()) |
+ StrictModeField::encode(scope->strict_mode()) |
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode);
scope_info->SetFlags(flags);
@@ -164,8 +164,8 @@ bool ScopeInfo::CallsEval() {
}
-LanguageMode ScopeInfo::language_mode() {
- return length() > 0 ? LanguageModeField::decode(Flags()) : CLASSIC_MODE;
+StrictMode ScopeInfo::strict_mode() {
+ return length() > 0 ? StrictModeField::decode(Flags()) : SLOPPY;
}
@@ -378,7 +378,7 @@ bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
Handle<String>(String::cast(scope_info->get(i))),
Handle<Object>(context->get(context_index), isolate),
::NONE,
- kNonStrictMode);
+ SLOPPY);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, false);
}
return true;
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 97b67bd5a4..bcb6435011 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -190,9 +190,8 @@ void Scope::SetDefaults(ScopeType scope_type,
scope_contains_with_ = false;
scope_calls_eval_ = false;
// Inherit the strict mode from the parent scope.
- language_mode_ = (outer_scope != NULL)
- ? outer_scope->language_mode_ : CLASSIC_MODE;
- outer_scope_calls_non_strict_eval_ = false;
+ strict_mode_ = outer_scope != NULL ? outer_scope->strict_mode_ : SLOPPY;
+ outer_scope_calls_sloppy_eval_ = false;
inner_scope_calls_eval_ = false;
force_eager_compilation_ = false;
force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
@@ -207,7 +206,7 @@ void Scope::SetDefaults(ScopeType scope_type,
end_position_ = RelocInfo::kNoPosition;
if (!scope_info.is_null()) {
scope_calls_eval_ = scope_info->CallsEval();
- language_mode_ = scope_info->language_mode();
+ strict_mode_ = scope_info->strict_mode();
}
}
@@ -307,7 +306,7 @@ bool Scope::Analyze(CompilationInfo* info) {
}
#endif
- info->SetScope(scope);
+ info->PrepareForCompilation(scope);
return true;
}
@@ -470,7 +469,7 @@ Variable* Scope::DeclareLocal(Handle<String> name,
InitializationFlag init_flag,
Interface* interface) {
ASSERT(!already_resolved());
- // This function handles VAR and CONST modes. DYNAMIC variables are
+ // This function handles VAR, LET, and CONST modes. DYNAMIC variables are
// introduces during variable allocation, INTERNAL variables are allocated
// explicitly, and TEMPORARY variables are allocated via NewTemporary().
ASSERT(IsDeclaredVariableMode(mode));
@@ -643,13 +642,13 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
bool Scope::AllocateVariables(CompilationInfo* info,
AstNodeFactory<AstNullVisitor>* factory) {
// 1) Propagate scope information.
- bool outer_scope_calls_non_strict_eval = false;
+ bool outer_scope_calls_sloppy_eval = false;
if (outer_scope_ != NULL) {
- outer_scope_calls_non_strict_eval =
- outer_scope_->outer_scope_calls_non_strict_eval() |
- outer_scope_->calls_non_strict_eval();
+ outer_scope_calls_sloppy_eval =
+ outer_scope_->outer_scope_calls_sloppy_eval() |
+ outer_scope_->calls_sloppy_eval();
}
- PropagateScopeInfo(outer_scope_calls_non_strict_eval);
+ PropagateScopeInfo(outer_scope_calls_sloppy_eval);
// 2) Allocate module instances.
if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
@@ -881,21 +880,14 @@ void Scope::Print(int n) {
if (HasTrivialOuterContext()) {
Indent(n1, "// scope has trivial outer context\n");
}
- switch (language_mode()) {
- case CLASSIC_MODE:
- break;
- case STRICT_MODE:
- Indent(n1, "// strict mode scope\n");
- break;
- case EXTENDED_MODE:
- Indent(n1, "// extended mode scope\n");
- break;
+ if (strict_mode() == STRICT) {
+ Indent(n1, "// strict mode scope\n");
}
if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
- if (outer_scope_calls_non_strict_eval_) {
- Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
+ if (outer_scope_calls_sloppy_eval_) {
+ Indent(n1, "// outer scope calls 'eval' in sloppy context\n");
}
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
if (num_stack_slots_ > 0) { Indent(n1, "// ");
@@ -1017,9 +1009,9 @@ Variable* Scope::LookupRecursive(Handle<String> name,
// object).
*binding_kind = DYNAMIC_LOOKUP;
return NULL;
- } else if (calls_non_strict_eval()) {
+ } else if (calls_sloppy_eval()) {
// A variable binding may have been found in an outer scope, but the current
- // scope makes a non-strict 'eval' call, so the found variable may not be
+ // scope makes a sloppy 'eval' call, so the found variable may not be
// the correct one (the 'eval' may introduce a binding with the same name).
// In that case, change the lookup result to reflect this situation.
if (*binding_kind == BOUND) {
@@ -1071,8 +1063,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
break;
case UNBOUND_EVAL_SHADOWED:
- // No binding has been found. But some scope makes a
- // non-strict 'eval' call.
+ // No binding has been found. But some scope makes a sloppy 'eval' call.
var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
break;
@@ -1084,7 +1075,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
ASSERT(var != NULL);
- if (FLAG_harmony_scoping && is_extended_mode() &&
+ if (FLAG_harmony_scoping && strict_mode() == STRICT &&
var->is_const_mode() && proxy->IsLValue()) {
// Assignment to const. Throw a syntax error.
MessageLocation location(
@@ -1123,7 +1114,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
Isolate* isolate = info->isolate();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(1);
- USE(JSObject::SetElement(array, 0, var->name(), NONE, kStrictMode));
+ USE(JSObject::SetElement(array, 0, var->name(), NONE, STRICT));
Handle<Object> result =
factory->NewSyntaxError("module_type_error", array);
isolate->Throw(*result, &location);
@@ -1157,16 +1148,16 @@ bool Scope::ResolveVariablesRecursively(
}
-bool Scope::PropagateScopeInfo(bool outer_scope_calls_non_strict_eval ) {
- if (outer_scope_calls_non_strict_eval) {
- outer_scope_calls_non_strict_eval_ = true;
+bool Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
+ if (outer_scope_calls_sloppy_eval) {
+ outer_scope_calls_sloppy_eval_ = true;
}
- bool calls_non_strict_eval =
- this->calls_non_strict_eval() || outer_scope_calls_non_strict_eval_;
+ bool calls_sloppy_eval =
+ this->calls_sloppy_eval() || outer_scope_calls_sloppy_eval_;
for (int i = 0; i < inner_scopes_.length(); i++) {
Scope* inner_scope = inner_scopes_[i];
- if (inner_scope->PropagateScopeInfo(calls_non_strict_eval)) {
+ if (inner_scope->PropagateScopeInfo(calls_sloppy_eval)) {
inner_scope_calls_eval_ = true;
}
if (inner_scope->force_eager_compilation_) {
@@ -1246,7 +1237,7 @@ void Scope::AllocateParameterLocals() {
Variable* arguments = LocalLookup(isolate_->factory()->arguments_string());
ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
- bool uses_nonstrict_arguments = false;
+ bool uses_sloppy_arguments = false;
if (MustAllocate(arguments) && !HasArgumentsParameter()) {
// 'arguments' is used. Unless there is also a parameter called
@@ -1265,7 +1256,7 @@ void Scope::AllocateParameterLocals() {
// In strict mode 'arguments' does not alias formal parameters.
// Therefore in strict mode we allocate parameters as if 'arguments'
// were not used.
- uses_nonstrict_arguments = is_classic_mode();
+ uses_sloppy_arguments = strict_mode() == SLOPPY;
}
// The same parameter may occur multiple times in the parameters_ list.
@@ -1275,7 +1266,7 @@ void Scope::AllocateParameterLocals() {
for (int i = params_.length() - 1; i >= 0; --i) {
Variable* var = params_[i];
ASSERT(var->scope() == this);
- if (uses_nonstrict_arguments) {
+ if (uses_sloppy_arguments) {
// Force context allocation of the parameter.
var->ForceContextAllocation();
}
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 06aaa902cf..b0d84343e4 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -234,9 +234,7 @@ class Scope: public ZoneObject {
void RecordEvalCall() { if (!is_global_scope()) scope_calls_eval_ = true; }
// Set the strict mode flag (unless disabled by a global flag).
- void SetLanguageMode(LanguageMode language_mode) {
- language_mode_ = language_mode;
- }
+ void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
// Position in the source where this scope begins and ends.
//
@@ -293,23 +291,17 @@ class Scope: public ZoneObject {
return is_eval_scope() || is_function_scope() ||
is_module_scope() || is_global_scope();
}
- bool is_classic_mode() const {
- return language_mode() == CLASSIC_MODE;
- }
- bool is_extended_mode() const {
- return language_mode() == EXTENDED_MODE;
- }
- bool is_strict_or_extended_eval_scope() const {
- return is_eval_scope() && !is_classic_mode();
+ bool is_strict_eval_scope() const {
+ return is_eval_scope() && strict_mode_ == STRICT;
}
// Information about which scopes calls eval.
bool calls_eval() const { return scope_calls_eval_; }
- bool calls_non_strict_eval() {
- return scope_calls_eval_ && is_classic_mode();
+ bool calls_sloppy_eval() {
+ return scope_calls_eval_ && strict_mode_ == SLOPPY;
}
- bool outer_scope_calls_non_strict_eval() const {
- return outer_scope_calls_non_strict_eval_;
+ bool outer_scope_calls_sloppy_eval() const {
+ return outer_scope_calls_sloppy_eval_;
}
// Is this scope inside a with statement.
@@ -324,7 +316,7 @@ class Scope: public ZoneObject {
ScopeType scope_type() const { return scope_type_; }
// The language mode of this scope.
- LanguageMode language_mode() const { return language_mode_; }
+ StrictMode strict_mode() const { return strict_mode_; }
// The variable corresponding the 'this' value.
Variable* receiver() { return receiver_; }
@@ -493,14 +485,14 @@ class Scope: public ZoneObject {
// This scope or a nested catch scope or with scope contain an 'eval' call. At
// the 'eval' call site this scope is the declaration scope.
bool scope_calls_eval_;
- // The language mode of this scope.
- LanguageMode language_mode_;
+ // The strict mode of this scope.
+ StrictMode strict_mode_;
// Source positions.
int start_position_;
int end_position_;
// Computed via PropagateScopeInfo.
- bool outer_scope_calls_non_strict_eval_;
+ bool outer_scope_calls_sloppy_eval_;
bool inner_scope_calls_eval_;
bool force_eager_compilation_;
bool force_context_allocation_;
@@ -538,13 +530,13 @@ class Scope: public ZoneObject {
// The variable reference could be statically resolved to a variable binding
// which is returned. There is no 'with' statement between the reference and
// the binding and no scope between the reference scope (inclusive) and
- // binding scope (exclusive) makes a non-strict 'eval' call.
+ // binding scope (exclusive) makes a sloppy 'eval' call.
BOUND,
// The variable reference could be statically resolved to a variable binding
// which is returned. There is no 'with' statement between the reference and
// the binding, but some scope between the reference scope (inclusive) and
- // binding scope (exclusive) makes a non-strict 'eval' call, that might
+ // binding scope (exclusive) makes a sloppy 'eval' call, that might
// possibly introduce variable bindings shadowing the found one. Thus the
// found variable binding is just a guess.
BOUND_EVAL_SHADOWED,
@@ -553,13 +545,13 @@ class Scope: public ZoneObject {
// and thus should be considered referencing a global variable. NULL is
// returned. The variable reference is not inside any 'with' statement and
// no scope between the reference scope (inclusive) and global scope
- // (exclusive) makes a non-strict 'eval' call.
+ // (exclusive) makes a sloppy 'eval' call.
UNBOUND,
// The variable reference could not be statically resolved to any binding
// NULL is returned. The variable reference is not inside any 'with'
// statement, but some scope between the reference scope (inclusive) and
- // global scope (exclusive) makes a non-strict 'eval' call, that might
+ // global scope (exclusive) makes a sloppy 'eval' call, that might
// possibly introduce a variable binding. Thus the reference should be
// considered referencing a global variable unless it is shadowed by an
// 'eval' introduced binding.
@@ -591,7 +583,7 @@ class Scope: public ZoneObject {
AstNodeFactory<AstNullVisitor>* factory);
// Scope analysis.
- bool PropagateScopeInfo(bool outer_scope_calls_non_strict_eval);
+ bool PropagateScopeInfo(bool outer_scope_calls_sloppy_eval);
bool HasTrivialContext() const;
// Predicates.
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 5adc2b8995..4048886fdb 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -175,6 +175,22 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
#undef RUNTIME_ENTRY
+#define RUNTIME_HIDDEN_ENTRY(name, nargs, ressize) \
+ { RUNTIME_FUNCTION, \
+ Runtime::kHidden##name, \
+ "Runtime::Hidden" #name },
+
+ RUNTIME_HIDDEN_FUNCTION_LIST(RUNTIME_HIDDEN_ENTRY)
+#undef RUNTIME_HIDDEN_ENTRY
+
+#define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize) \
+ { RUNTIME_FUNCTION, \
+ Runtime::kInlineOptimized##name, \
+ "Runtime::" #name },
+
+ INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_OPTIMIZED_ENTRY)
+#undef INLINE_OPTIMIZED_ENTRY
+
// IC utilities
#define IC_ENTRY(name) \
{ IC_UTILITY, \
@@ -297,6 +313,11 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_ENTRY,
1,
"Runtime::PerformGC");
+ // Runtime entries
+ Add(ExternalReference::out_of_memory_function(isolate).address(),
+ RUNTIME_ENTRY,
+ 2,
+ "Runtime::OutOfMemory");
Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
RUNTIME_ENTRY,
4,
@@ -310,11 +331,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_ENTRY,
6,
"StoreBuffer::StoreBufferOverflow");
- Add(ExternalReference::
- incremental_evacuation_record_write_function(isolate).address(),
- RUNTIME_ENTRY,
- 7,
- "IncrementalMarking::RecordWrite");
// Miscellaneous
Add(ExternalReference::roots_array_start(isolate).address(),
@@ -497,11 +513,12 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
52,
"cpu_features");
- Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
+ Add(ExternalReference(Runtime::kHiddenAllocateInNewSpace, isolate).address(),
UNCLASSIFIED,
53,
"Runtime::AllocateInNewSpace");
- Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(),
+ Add(ExternalReference(
+ Runtime::kHiddenAllocateInTargetSpace, isolate).address(),
UNCLASSIFIED,
54,
"Runtime::AllocateInTargetSpace");
@@ -789,6 +806,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
+ isolate_->heap()->IterateSmiRoots(this);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->RepairFreeListsAfterBoot();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
@@ -987,6 +1005,7 @@ void Deserializer::ReadChunk(Object** current,
reinterpret_cast<Address>(current); \
Assembler::deserialization_set_special_target_at( \
location_of_branch_data, \
+ Code::cast(HeapObject::FromAddress(current_object_address)), \
reinterpret_cast<Address>(new_object)); \
location_of_branch_data += Assembler::kSpecialTargetSize; \
current = reinterpret_cast<Object**>(location_of_branch_data); \
@@ -1148,15 +1167,15 @@ void Deserializer::ReadChunk(Object** current,
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if V8_TARGET_ARCH_MIPS
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
// Deserialize a new object from pointer found in code and write
- // a pointer to it to the current object. Required only for MIPS, and
- // omitted on the other architectures because it is fully unrolled and
- // would cause bloat.
+ // a pointer to it to the current object. Required only for MIPS or ARM
+ // with ool constant pool, and omitted on the other architectures because
+ // it is fully unrolled and would cause bloat.
ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to it to the current
- // object. Required only for MIPS.
+ // object. Required only for MIPS or ARM with ool constant pool.
ALL_SPACES(kBackref, kFromCode, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
#endif
@@ -1253,7 +1272,6 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
: isolate_(isolate),
sink_(sink),
- current_root_index_(0),
external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
root_index_wave_front_(0) {
// The serializer is meant to be used only to generate initial heap images
@@ -1279,7 +1297,7 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
// We don't support serializing installed extensions.
CHECK(!isolate->has_installed_extensions());
-
+ isolate->heap()->IterateSmiRoots(this);
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
@@ -1378,12 +1396,11 @@ int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
if (!root->IsSmi() && root == heap_object) {
-#if V8_TARGET_ARCH_MIPS
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
if (from == kFromCode) {
// In order to avoid code bloat in the deserializer we don't have
// support for the encoding that specifies a particular root should
- // be written into the lui/ori instructions on MIPS. Therefore we
- // should not generate such serialization data for MIPS.
+ // be written from within code.
return kInvalidRootIndex;
}
#endif
@@ -1636,6 +1653,9 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
+ // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
+ if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
+
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
@@ -1681,6 +1701,9 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
+ // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
+ if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
+
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -1698,6 +1721,9 @@ void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
+ // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
+ if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
+
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
Cell* object = Cell::cast(rinfo->target_cell());
serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
@@ -1743,7 +1769,9 @@ static void WipeOutRelocations(Code* code) {
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
- it.rinfo()->WipeOut();
+ if (!(FLAG_enable_ool_constant_pool && it.rinfo()->IsInConstantPool())) {
+ it.rinfo()->WipeOut();
+ }
}
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 9229bad406..2947144750 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -579,7 +579,6 @@ class Serializer : public SerializerDeserializer {
// relative addresses for back references.
int fullness_[LAST_SPACE + 1];
SnapshotByteSink* sink_;
- int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index 485e930645..b61eaa2609 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -32,6 +32,8 @@
#include "ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/simulator-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index a80341bd7f..6c03daa75a 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -483,7 +483,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
- chunk->parallel_sweeping_ = 0;
+ chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
@@ -560,21 +560,12 @@ bool MemoryChunk::CommitArea(size_t requested) {
void MemoryChunk::InsertAfter(MemoryChunk* other) {
- next_chunk_ = other->next_chunk_;
- prev_chunk_ = other;
+ MemoryChunk* other_next = other->next_chunk();
- // This memory barrier is needed since concurrent sweeper threads may iterate
- // over the list of pages while a new page is inserted.
- // TODO(hpayer): find a cleaner way to guarantee that the page list can be
- // expanded concurrently
- MemoryBarrier();
-
- // The following two write operations can take effect in arbitrary order
- // since pages are always iterated by the sweeper threads in LIFO order, i.e,
- // the inserted page becomes visible for the sweeper threads after
- // other->next_chunk_ = this;
- other->next_chunk_->prev_chunk_ = this;
- other->next_chunk_ = this;
+ set_next_chunk(other_next);
+ set_prev_chunk(other);
+ other_next->set_prev_chunk(this);
+ other->set_next_chunk(this);
}
@@ -583,10 +574,12 @@ void MemoryChunk::Unlink() {
heap_->decrement_scan_on_scavenge_pages();
ClearFlag(SCAN_ON_SCAVENGE);
}
- next_chunk_->prev_chunk_ = prev_chunk_;
- prev_chunk_->next_chunk_ = next_chunk_;
- prev_chunk_ = NULL;
- next_chunk_ = NULL;
+ MemoryChunk* next_element = next_chunk();
+ MemoryChunk* prev_element = prev_chunk();
+ next_element->set_prev_chunk(prev_element);
+ prev_element->set_next_chunk(next_element);
+ set_prev_chunk(NULL);
+ set_next_chunk(NULL);
}
@@ -2082,20 +2075,21 @@ void FreeListNode::set_next(FreeListNode* next) {
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
- if (category->top_ != NULL) {
- ASSERT(category->end_ != NULL);
+ if (category->top() != NULL) {
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
LockGuard<Mutex> target_lock_guard(mutex());
LockGuard<Mutex> source_lock_guard(category->mutex());
+ ASSERT(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
} else {
- category->end()->set_next(top_);
+ category->end()->set_next(top());
}
- top_ = category->top();
+ set_top(category->top());
+ NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
}
@@ -2104,15 +2098,16 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
void FreeListCategory::Reset() {
- top_ = NULL;
- end_ = NULL;
- available_ = 0;
+ set_top(NULL);
+ set_end(NULL);
+ set_available(0);
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
int sum = 0;
- FreeListNode** n = &top_;
+ FreeListNode* t = top();
+ FreeListNode** n = &t;
while (*n != NULL) {
if (Page::FromAddress((*n)->address()) == p) {
FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
@@ -2122,8 +2117,9 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
n = (*n)->next_address();
}
}
- if (top_ == NULL) {
- end_ = NULL;
+ set_top(t);
+ if (top() == NULL) {
+ set_end(NULL);
}
available_ -= sum;
return sum;
@@ -2131,17 +2127,17 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
- FreeListNode** n = &top_;
- while (*n != NULL) {
- if (Page::FromAddress((*n)->address()) == p) return true;
- n = (*n)->next_address();
+ FreeListNode* node = top();
+ while (node != NULL) {
+ if (Page::FromAddress(node->address()) == p) return true;
+ node = node->next();
}
return false;
}
FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
- FreeListNode* node = top_;
+ FreeListNode* node = top();
if (node == NULL) return NULL;
@@ -2180,8 +2176,8 @@ FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
- node->set_next(top_);
- top_ = node;
+ node->set_next(top());
+ set_top(node);
if (end_ == NULL) {
end_ = node;
}
@@ -2190,7 +2186,7 @@ void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeListNode* n = top_;
+ FreeListNode* n = top();
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
@@ -2299,7 +2295,8 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
int huge_list_available = huge_list_.available();
- for (FreeListNode** cur = huge_list_.GetTopAddress();
+ FreeListNode* top_node = huge_list_.top();
+ for (FreeListNode** cur = &top_node;
*cur != NULL;
cur = (*cur)->next_address()) {
FreeListNode* cur_node = *cur;
@@ -2333,6 +2330,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
}
+ huge_list_.set_top(top_node);
if (huge_list_.top() == NULL) {
huge_list_.set_end(NULL);
}
@@ -2486,7 +2484,7 @@ void FreeList::RepairLists(Heap* heap) {
#ifdef DEBUG
intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
- FreeListNode* cur = top_;
+ FreeListNode* cur = top();
while (cur != NULL) {
ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
@@ -2502,7 +2500,7 @@ static const int kVeryLongFreeList = 500;
int FreeListCategory::FreeListLength() {
int length = 0;
- FreeListNode* cur = top_;
+ FreeListNode* cur = top();
while (cur != NULL) {
length++;
cur = cur->next();
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 9d47f81ac6..908e723827 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -313,11 +313,21 @@ class MemoryChunk {
bool is_valid() { return address() != NULL; }
- MemoryChunk* next_chunk() const { return next_chunk_; }
- MemoryChunk* prev_chunk() const { return prev_chunk_; }
+ MemoryChunk* next_chunk() const {
+ return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_));
+ }
+
+ MemoryChunk* prev_chunk() const {
+ return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_));
+ }
- void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
- void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
+ void set_next_chunk(MemoryChunk* next) {
+ Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next));
+ }
+
+ void set_prev_chunk(MemoryChunk* prev) {
+ Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev));
+ }
Space* owner() const {
if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
@@ -457,16 +467,35 @@ class MemoryChunk {
// Return all current flags.
intptr_t GetFlags() { return flags_; }
- intptr_t parallel_sweeping() const {
- return parallel_sweeping_;
+
+ // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
+ // sweeping must not be performed on that page.
+ // PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
+ // page and will not touch the page memory anymore.
+ // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
+ // sweeper thread.
+ // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
+ enum ParallelSweepingState {
+ PARALLEL_SWEEPING_DONE,
+ PARALLEL_SWEEPING_FINALIZE,
+ PARALLEL_SWEEPING_IN_PROGRESS,
+ PARALLEL_SWEEPING_PENDING
+ };
+
+ ParallelSweepingState parallel_sweeping() {
+ return static_cast<ParallelSweepingState>(
+ Acquire_Load(&parallel_sweeping_));
}
- void set_parallel_sweeping(intptr_t state) {
- parallel_sweeping_ = state;
+ void set_parallel_sweeping(ParallelSweepingState state) {
+ Release_Store(&parallel_sweeping_, state);
}
bool TryParallelSweeping() {
- return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
+ return Acquire_CompareAndSwap(&parallel_sweeping_,
+ PARALLEL_SWEEPING_PENDING,
+ PARALLEL_SWEEPING_IN_PROGRESS) ==
+ PARALLEL_SWEEPING_PENDING;
}
// Manage live byte count (count of bytes known to be live,
@@ -536,7 +565,7 @@ class MemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1;
- static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
+ static const intptr_t kSizeOffset = 0;
static const intptr_t kLiveBytesOffset =
kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
@@ -550,7 +579,8 @@ class MemoryChunk {
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
kIntSize + kIntSize + kPointerSize +
- 5 * kPointerSize;
+ 5 * kPointerSize +
+ kPointerSize + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -622,7 +652,7 @@ class MemoryChunk {
inline Heap* heap() { return heap_; }
- static const int kFlagsOffset = kPointerSize * 3;
+ static const int kFlagsOffset = kPointerSize;
bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
@@ -671,8 +701,6 @@ class MemoryChunk {
static inline void UpdateHighWaterMark(Address mark);
protected:
- MemoryChunk* next_chunk_;
- MemoryChunk* prev_chunk_;
size_t size_;
intptr_t flags_;
@@ -702,7 +730,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
- intptr_t parallel_sweeping_;
+ AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics.
intptr_t available_in_small_free_list_;
@@ -719,6 +747,12 @@ class MemoryChunk {
Executability executable,
Space* owner);
+ private:
+ // next_chunk_ holds a pointer of type MemoryChunk
+ AtomicWord next_chunk_;
+ // prev_chunk_ holds a pointer of type MemoryChunk
+ AtomicWord prev_chunk_;
+
friend class MemoryAllocator;
};
@@ -1503,7 +1537,7 @@ class FreeListNode: public HeapObject {
class FreeListCategory {
public:
FreeListCategory() :
- top_(NULL),
+ top_(0),
end_(NULL),
available_(0) {}
@@ -1521,9 +1555,13 @@ class FreeListCategory {
void RepairFreeList(Heap* heap);
- FreeListNode** GetTopAddress() { return &top_; }
- FreeListNode* top() const { return top_; }
- void set_top(FreeListNode* top) { top_ = top; }
+ FreeListNode* top() const {
+ return reinterpret_cast<FreeListNode*>(NoBarrier_Load(&top_));
+ }
+
+ void set_top(FreeListNode* top) {
+ NoBarrier_Store(&top_, reinterpret_cast<AtomicWord>(top));
+ }
FreeListNode** GetEndAddress() { return &end_; }
FreeListNode* end() const { return end_; }
@@ -1536,7 +1574,7 @@ class FreeListCategory {
Mutex* mutex() { return &mutex_; }
bool IsEmpty() {
- return top_ == NULL;
+ return top() == 0;
}
#ifdef DEBUG
@@ -1545,7 +1583,8 @@ class FreeListCategory {
#endif
private:
- FreeListNode* top_;
+ // top_ points to the top FreeListNode* in the free list category.
+ AtomicWord top_;
FreeListNode* end_;
Mutex mutex_;
diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc
index e89eb1bfed..a1479b2b9a 100644
--- a/deps/v8/src/store-buffer.cc
+++ b/deps/v8/src/store-buffer.cc
@@ -509,10 +509,12 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
// be marked with a free space or filler. Because the free space and filler
// maps do not move we can always recognize these even after a compaction.
// Normal objects like FixedArrays and JSObjects should not contain references
-// to these maps. The special garbage section (see comment in spaces.h) is
-// skipped since it can contain absolutely anything. Any objects that are
-// allocated during iteration may or may not be visited by the iteration, but
-// they will not be partially visited.
+// to these maps. Constant pool array objects may contain references to these
+// maps, however, constant pool arrays cannot contain pointers to new space
+// objects, therefore they are skipped. The special garbage section (see
+// comment in spaces.h) is skipped since it can contain absolutely anything.
+// Any objects that are allocated during iteration may or may not be visited by
+// the iteration, but they will not be partially visited.
void StoreBuffer::FindPointersToNewSpaceOnPage(
PagedSpace* space,
Page* page,
@@ -526,13 +528,17 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
Object* free_space_map = heap_->free_space_map();
Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
+ Object* constant_pool_array_map = heap_->constant_pool_array_map();
while (visitable_end < end_of_page) {
Object* o = *reinterpret_cast<Object**>(visitable_end);
- // Skip fillers but not things that look like fillers in the special
- // garbage section which can contain anything.
+ // Skip fillers or constant pool arrays (which never contain new-space
+ // pointers but can contain pointers which can be confused for fillers)
+ // but not things that look like fillers in the special garbage section
+ // which can contain anything.
if (o == free_space_map ||
o == two_pointer_filler_map ||
+ o == constant_pool_array_map ||
(visitable_end == space->top() && visitable_end != space->limit())) {
if (visitable_start != visitable_end) {
// After calling this the special garbage section may have moved.
@@ -549,12 +555,12 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
if (visitable_end == space->top() && visitable_end != space->limit()) {
visitable_start = visitable_end = space->limit();
} else {
- // At this point we are either at the start of a filler or we are at
- // the point where the space->top() used to be before the
- // visit_pointer_region call above. Either way we can skip the
- // object at the current spot: We don't promise to visit objects
- // allocated during heap traversal, and if space->top() moved then it
- // must be because an object was allocated at this point.
+ // At this point we are either at the start of a filler, a
+ // constant pool array, or we are at the point where the space->top()
+ // used to be before the visit_pointer_region call above. Either way we
+ // can skip the object at the current spot: We don't promise to visit
+ // objects allocated during heap traversal, and if space->top() moved
+ // then it must be because an object was allocated at this point.
visitable_start =
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
visitable_end = visitable_start;
@@ -562,6 +568,7 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
} else {
ASSERT(o != free_space_map);
ASSERT(o != two_pointer_filler_map);
+ ASSERT(o != constant_pool_array_map);
ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
visitable_end += kPointerSize;
}
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 5dfce55fb9..ff641dddf9 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -116,9 +116,9 @@ Handle<Code> StubCache::FindIC(Handle<Name> name,
Handle<Code> StubCache::FindHandler(Handle<Name> name,
Handle<Map> stub_holder,
Code::Kind kind,
- InlineCacheHolderFlag cache_holder) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::HANDLER, kNoExtraICState, cache_holder, Code::NORMAL, kind);
+ InlineCacheHolderFlag cache_holder,
+ Code::StubType type) {
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -127,11 +127,11 @@ Handle<Code> StubCache::FindHandler(Handle<Name> name,
Handle<Code> StubCache::ComputeMonomorphicIC(
+ Code::Kind kind,
Handle<Name> name,
Handle<HeapType> type,
Handle<Code> handler,
ExtraICState extra_ic_state) {
- Code::Kind kind = handler->handler_kind();
InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
Handle<Map> stub_holder;
@@ -179,7 +179,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
// therefore the stub will be specific to the name.
Handle<Map> current_map = stub_holder;
Handle<Name> cache_name = current_map->is_dictionary_map()
- ? name : Handle<Name>::cast(isolate()->factory()->empty_string());
+ ? name : Handle<Name>::cast(isolate()->factory()->nonexistent_symbol());
Handle<Object> next(current_map->prototype(), isolate());
Handle<JSObject> last = Handle<JSObject>::null();
while (!next->IsNull()) {
@@ -192,8 +192,10 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
Handle<Code> handler = FindHandler(
- cache_name, stub_holder, Code::LOAD_IC, flag);
- if (!handler.is_null()) return handler;
+ cache_name, stub_holder, Code::LOAD_IC, flag, Code::FAST);
+ if (!handler.is_null()) {
+ return handler;
+ }
LoadStubCompiler compiler(isolate_, kNoExtraICState, flag);
handler = compiler.CompileLoadNonexistent(type, last, cache_name);
@@ -220,7 +222,7 @@ Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) {
Handle<Code> StubCache::ComputeKeyedStoreElement(
Handle<Map> receiver_map,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
KeyedAccessStoreMode store_mode) {
ExtraICState extra_state =
KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
@@ -331,8 +333,9 @@ Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
if (!cached_ic.is_null()) return cached_ic;
}
- Handle<Code> ic = stub.GetCodeCopyFromTemplate(isolate_);
- ic->ReplaceNthObject(1, isolate_->heap()->meta_map(), *receiver_map);
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate_->factory()->meta_map(), receiver_map);
+ Handle<Code> ic = stub.GetCodeCopy(isolate_, pattern);
if (!receiver_map->is_shared()) {
Map::UpdateCodeCache(receiver_map, name, ic);
@@ -369,14 +372,13 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
Handle<Code> StubCache::ComputePolymorphicIC(
+ Code::Kind kind,
TypeHandleList* types,
CodeHandleList* handlers,
int number_of_valid_types,
Handle<Name> name,
ExtraICState extra_ic_state) {
-
Handle<Code> handler = handlers->at(0);
- Code::Kind kind = handler->handler_kind();
Code::StubType type = number_of_valid_types == 1 ? handler->type()
: Code::NORMAL;
if (kind == Code::LOAD_IC) {
@@ -395,7 +397,7 @@ Handle<Code> StubCache::ComputePolymorphicIC(
Handle<Code> StubCache::ComputeStoreElementPolymorphic(
MapHandleList* receiver_maps,
KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
ASSERT(store_mode == STANDARD_STORE ||
store_mode == STORE_AND_GROW_NO_TRANSITION ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
@@ -662,10 +664,14 @@ RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
- JSObject* receiver = JSObject::cast(args[0]);
+ HandleScope scope(isolate);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
ASSERT(args.smi_at(1) >= 0);
uint32_t index = args.smi_at(1);
- return receiver->GetElementWithInterceptor(receiver, index);
+ Handle<Object> result =
+ JSObject::GetElementWithInterceptor(receiver, receiver, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -690,9 +696,7 @@ Handle<Code> StubCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
Handle<Code> StubCompiler::CompileLoadMegamorphic(Code::Flags flags) {
- ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- ContextualMode mode = LoadIC::GetContextualMode(extra_state);
- LoadIC::GenerateMegamorphic(masm(), mode);
+ LoadIC::GenerateMegamorphic(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadMegamorphic");
PROFILE(isolate(),
CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
@@ -723,7 +727,7 @@ Handle<Code> StubCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
Handle<Code> StubCompiler::CompileStoreGeneric(Code::Flags flags) {
ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- StrictModeFlag strict_mode = StoreIC::GetStrictMode(extra_state);
+ StrictMode strict_mode = StoreIC::GetStrictMode(extra_state);
StoreIC::GenerateRuntimeSetProperty(masm(), strict_mode);
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
PROFILE(isolate(),
@@ -734,8 +738,7 @@ Handle<Code> StubCompiler::CompileStoreGeneric(Code::Flags flags) {
Handle<Code> StubCompiler::CompileStoreMegamorphic(Code::Flags flags) {
- ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- StoreIC::GenerateMegamorphic(masm(), extra_state);
+ StoreIC::GenerateMegamorphic(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
PROFILE(isolate(),
CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
@@ -951,8 +954,10 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
ASSERT(call_optimization.is_simple_api_call());
Handle<JSFunction> callback = call_optimization.constant_function();
CallbackHandlerFrontend(type, receiver(), holder, name, callback);
- GenerateLoadCallback(call_optimization, IC::TypeToMap(*type, isolate()));
-
+ Handle<Map>receiver_map = IC::TypeToMap(*type, isolate());
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver_map,
+ receiver(), scratch1(), false, 0, NULL);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1118,6 +1123,30 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
}
+Handle<Code> StoreStubCompiler::CompileStoreArrayLength(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+ Label miss;
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value(), &miss);
+
+ // Generate tail call to StoreIC_ArrayLength.
+ GenerateStoreArrayLength();
+
+ // Handle miss case.
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -1125,8 +1154,24 @@ Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<JSFunction> setter) {
Handle<HeapType> type = IC::CurrentTypeOf(object, isolate());
HandlerFrontend(type, receiver(), holder, name);
- GenerateStoreViaSetter(masm(), type, setter);
+ GenerateStoreViaSetter(masm(), type, receiver(), setter);
+
+ return GetCode(kind(), Code::FAST, name);
+}
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, handle(object->map()),
+ receiver(), scratch1(), true, 1, values);
+ // Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1236,8 +1281,8 @@ Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name) {
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state(), type, kind, cache_holder_);
+ ASSERT_EQ(kNoExtraICState, extra_state());
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder_);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
@@ -1265,6 +1310,8 @@ void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps,
cached_stub =
KeyedLoadFastElementStub(is_js_array,
elements_kind).GetCode(isolate());
+ } else if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_SloppyArguments();
} else {
ASSERT(elements_kind == DICTIONARY_ELEMENTS);
cached_stub = KeyedLoadDictionaryElementStub().GetCode(isolate());
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index f55c440ea4..7a304fe71a 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -89,9 +89,11 @@ class StubCache {
Handle<Code> FindHandler(Handle<Name> name,
Handle<Map> map,
Code::Kind kind,
- InlineCacheHolderFlag cache_holder = OWN_MAP);
+ InlineCacheHolderFlag cache_holder,
+ Code::StubType type);
- Handle<Code> ComputeMonomorphicIC(Handle<Name> name,
+ Handle<Code> ComputeMonomorphicIC(Code::Kind kind,
+ Handle<Name> name,
Handle<HeapType> type,
Handle<Code> handler,
ExtraICState extra_ic_state);
@@ -101,7 +103,7 @@ class StubCache {
Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
KeyedAccessStoreMode store_mode);
// ---
@@ -120,9 +122,10 @@ class StubCache {
Handle<Code> ComputeLoadElementPolymorphic(MapHandleList* receiver_maps);
Handle<Code> ComputeStoreElementPolymorphic(MapHandleList* receiver_maps,
KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
- Handle<Code> ComputePolymorphicIC(TypeHandleList* types,
+ Handle<Code> ComputePolymorphicIC(Code::Kind kind,
+ TypeHandleList* types,
CodeHandleList* handlers,
int number_of_valid_maps,
Handle<Name> name,
@@ -357,12 +360,6 @@ class StubCompiler BASE_EMBEDDED {
Register scratch,
Label* miss_label);
- static void GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -404,6 +401,15 @@ class StubCompiler BASE_EMBEDDED {
void GenerateBooleanCheck(Register object, Label* miss);
+ static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch,
+ bool is_store,
+ int argc,
+ Register* values);
+
protected:
Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
@@ -510,11 +516,11 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
}
void JitEvent(Handle<Name> name, Handle<Code> code);
- virtual Register receiver() = 0;
- virtual Register name() = 0;
- virtual Register scratch1() = 0;
- virtual Register scratch2() = 0;
- virtual Register scratch3() = 0;
+ Register receiver() { return registers_[0]; }
+ Register name() { return registers_[1]; }
+ Register scratch1() { return registers_[2]; }
+ Register scratch2() { return registers_[3]; }
+ Register scratch3() { return registers_[4]; }
void InitializeRegisters();
@@ -571,6 +577,11 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
Register receiver,
Handle<JSFunction> getter);
+ static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) {
+ GenerateLoadViaGetter(
+ masm, Handle<HeapType>::null(), no_reg, Handle<JSFunction>());
+ }
+
Handle<Code> CompileLoadNonexistent(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name);
@@ -581,8 +592,6 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<Name> name,
bool is_dont_delete);
- static Register* registers();
-
protected:
ContextualMode contextual_mode() {
return LoadIC::GetContextualMode(extra_state());
@@ -624,12 +633,10 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<Name> name,
LookupResult* lookup);
- virtual Register receiver() { return registers_[0]; }
- virtual Register name() { return registers_[1]; }
- virtual Register scratch1() { return registers_[2]; }
- virtual Register scratch2() { return registers_[3]; }
- virtual Register scratch3() { return registers_[4]; }
+ private:
+ static Register* registers();
Register scratch4() { return registers_[5]; }
+ friend class BaseLoadStoreStubCompiler;
};
@@ -672,6 +679,12 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
LookupResult* lookup,
Handle<Name> name);
+ Handle<Code> CompileStoreArrayLength(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name);
+
+ void GenerateStoreArrayLength();
+
void GenerateNegativeHolderLookup(MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -714,8 +727,14 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
static void GenerateStoreViaSetter(MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter);
+ static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
+ GenerateStoreViaSetter(
+ masm, Handle<HeapType>::null(), no_reg, Handle<JSFunction>());
+ }
+
Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
@@ -745,17 +764,9 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
Label* label,
Handle<Name> name);
- virtual Register receiver() { return registers_[0]; }
- virtual Register name() { return registers_[1]; }
- Register value() { return registers_[2]; }
- virtual Register scratch1() { return registers_[3]; }
- virtual Register scratch2() { return registers_[4]; }
- virtual Register scratch3() { return registers_[5]; }
-
- protected:
- static Register* registers();
-
private:
+ static Register* registers();
+ static Register value();
friend class BaseLoadStoreStubCompiler;
};
@@ -783,9 +794,7 @@ class KeyedStoreStubCompiler: public StoreStubCompiler {
return KeyedStoreIC::GetKeyedAccessStoreMode(extra_state());
}
- Register transition_map() {
- return registers()[3];
- }
+ Register transition_map() { return scratch1(); }
friend class BaseLoadStoreStubCompiler;
};
diff --git a/deps/v8/src/sweeper-thread.cc b/deps/v8/src/sweeper-thread.cc
index 097b594a74..7e8305abe8 100644
--- a/deps/v8/src/sweeper-thread.cc
+++ b/deps/v8/src/sweeper-thread.cc
@@ -45,6 +45,7 @@ SweeperThread::SweeperThread(Isolate* isolate)
start_sweeping_semaphore_(0),
end_sweeping_semaphore_(0),
stop_semaphore_(0) {
+ ASSERT(!FLAG_job_based_sweeping);
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
}
diff --git a/deps/v8/src/symbol.js b/deps/v8/src/symbol.js
index be308d947c..e7ea5a68d8 100644
--- a/deps/v8/src/symbol.js
+++ b/deps/v8/src/symbol.js
@@ -36,39 +36,60 @@ var $Symbol = global.Symbol;
// -------------------------------------------------------------------
function SymbolConstructor(x) {
- var value =
- IS_SYMBOL(x) ? x : %CreateSymbol(IS_UNDEFINED(x) ? x : ToString(x));
if (%_IsConstructCall()) {
- %_SetValueOf(this, value);
- } else {
- return value;
+ throw MakeTypeError('not_constructor', ["Symbol"]);
}
+ // NOTE: Passing in a Symbol value will throw on ToString().
+ return %CreateSymbol(IS_UNDEFINED(x) ? x : ToString(x));
}
-function SymbolGetName() {
- var symbol = IS_SYMBOL_WRAPPER(this) ? %_ValueOf(this) : this;
- if (!IS_SYMBOL(symbol)) {
+
+function SymbolToString() {
+ if (!(IS_SYMBOL(this) || IS_SYMBOL_WRAPPER(this))) {
throw MakeTypeError(
- 'incompatible_method_receiver', ["Symbol.prototype.name", this]);
+ 'incompatible_method_receiver', ["Symbol.prototype.toString", this]);
}
- return %SymbolName(symbol);
+ var description = %SymbolDescription(%_ValueOf(this));
+ return "Symbol(" + (IS_UNDEFINED(description) ? "" : description) + ")";
}
-function SymbolToString() {
- throw MakeTypeError('symbol_to_string');
-}
function SymbolValueOf() {
- // NOTE: Both Symbol objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- if (!IS_SYMBOL(this) && !IS_SYMBOL_WRAPPER(this)) {
+ if (!(IS_SYMBOL(this) || IS_SYMBOL_WRAPPER(this))) {
throw MakeTypeError(
- 'incompatible_method_receiver', ["Symbol.prototype.valueOf", this]);
+ 'incompatible_method_receiver', ["Symbol.prototype.valueOf", this]);
}
return %_ValueOf(this);
}
+function InternalSymbol(key) {
+ var internal_registry = %SymbolRegistry().for_intern;
+ if (IS_UNDEFINED(internal_registry[key])) {
+ internal_registry[key] = %CreateSymbol(key);
+ }
+ return internal_registry[key];
+}
+
+
+function SymbolFor(key) {
+ key = TO_STRING_INLINE(key);
+ var registry = %SymbolRegistry();
+ if (IS_UNDEFINED(registry.for[key])) {
+ var symbol = %CreateSymbol(key);
+ registry.for[key] = symbol;
+ registry.keyFor[symbol] = key;
+ }
+ return registry.for[key];
+}
+
+
+function SymbolKeyFor(symbol) {
+ if (!IS_SYMBOL(symbol)) throw MakeTypeError("not_a_symbol", [symbol]);
+ return %SymbolRegistry().keyFor[symbol];
+}
+
+
// ES6 19.1.2.8
function ObjectGetOwnPropertySymbols(obj) {
if (!IS_SPEC_OBJECT(obj)) {
@@ -84,14 +105,38 @@ function ObjectGetOwnPropertySymbols(obj) {
//-------------------------------------------------------------------
+var symbolCreate = InternalSymbol("Symbol.create");
+var symbolHasInstance = InternalSymbol("Symbol.hasInstance");
+var symbolIsConcatSpreadable = InternalSymbol("Symbol.isConcatSpreadable");
+var symbolIsRegExp = InternalSymbol("Symbol.isRegExp");
+var symbolIterator = InternalSymbol("Symbol.iterator");
+var symbolToStringTag = InternalSymbol("Symbol.toStringTag");
+var symbolUnscopables = InternalSymbol("Symbol.unscopables");
+
+
+//-------------------------------------------------------------------
+
function SetUpSymbol() {
%CheckIsBootstrapping();
%SetCode($Symbol, SymbolConstructor);
- %FunctionSetPrototype($Symbol, new $Symbol());
- %SetProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM);
+ %FunctionSetPrototype($Symbol, new $Object());
+
+ InstallConstants($Symbol, $Array(
+ "create", symbolCreate,
+ "hasInstance", symbolHasInstance,
+ "isConcatSpreadable", symbolIsConcatSpreadable,
+ "isRegExp", symbolIsRegExp,
+ "iterator", symbolIterator,
+ "toStringTag", symbolToStringTag,
+ "unscopables", symbolUnscopables
+ ));
+ InstallFunctions($Symbol, DONT_ENUM, $Array(
+ "for", SymbolFor,
+ "keyFor", SymbolKeyFor
+ ));
- InstallGetter($Symbol.prototype, "name", SymbolGetName);
+ %SetProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM);
InstallFunctions($Symbol.prototype, DONT_ENUM, $Array(
"toString", SymbolToString,
"valueOf", SymbolValueOf
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 39bcc24074..8efaa477b1 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -73,7 +73,7 @@ namespace internal {
T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
T(INIT_LET, "=init_let", 2) /* AST-use only. */ \
T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
- T(INIT_CONST_HARMONY, "=init_const_harmony", 2) /* AST-use only. */ \
+ T(INIT_CONST_LEGACY, "=init_const_legacy", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
T(ASSIGN_BIT_OR, "|=", 2) \
T(ASSIGN_BIT_XOR, "^=", 2) \
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index 7895117137..dc1620a07d 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -28,7 +28,6 @@
#ifndef V8_TRANSITIONS_INL_H_
#define V8_TRANSITIONS_INL_H_
-#include "objects-inl.h"
#include "transitions.h"
namespace v8 {
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 2ca04b88fc..99b1b3d899 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -47,6 +47,12 @@ TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
Zone* zone)
: native_context_(native_context),
zone_(zone) {
+ Object* raw_info = code->type_feedback_info();
+ if (raw_info->IsTypeFeedbackInfo()) {
+ feedback_vector_ = Handle<FixedArray>(TypeFeedbackInfo::cast(raw_info)->
+ feedback_vector());
+ }
+
BuildDictionary(code);
ASSERT(dictionary_->IsDictionary());
}
@@ -72,6 +78,17 @@ Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
}
+Handle<Object> TypeFeedbackOracle::GetInfo(int slot) {
+ ASSERT(slot >= 0 && slot < feedback_vector_->length());
+ Object* obj = feedback_vector_->get(slot);
+ if (!obj->IsJSFunction() ||
+ !CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) {
+ return Handle<Object>(obj, isolate());
+ }
+ return Handle<Object>::cast(isolate()->factory()->undefined_value());
+}
+
+
bool TypeFeedbackOracle::LoadIsUninitialized(TypeFeedbackId id) {
Handle<Object> maybe_code = GetInfo(id);
if (maybe_code->IsCode()) {
@@ -101,22 +118,26 @@ bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
}
-bool TypeFeedbackOracle::CallIsMonomorphic(TypeFeedbackId id) {
- Handle<Object> value = GetInfo(id);
- return value->IsAllocationSite() || value->IsJSFunction();
+bool TypeFeedbackOracle::CallIsMonomorphic(int slot) {
+ Handle<Object> value = GetInfo(slot);
+ return FLAG_pretenuring_call_new
+ ? value->IsJSFunction()
+ : value->IsAllocationSite() || value->IsJSFunction();
}
-bool TypeFeedbackOracle::CallNewIsMonomorphic(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- return info->IsAllocationSite() || info->IsJSFunction();
+bool TypeFeedbackOracle::CallNewIsMonomorphic(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ return FLAG_pretenuring_call_new
+ ? info->IsJSFunction()
+ : info->IsAllocationSite() || info->IsJSFunction();
}
-byte TypeFeedbackOracle::ForInType(TypeFeedbackId id) {
- Handle<Object> value = GetInfo(id);
+byte TypeFeedbackOracle::ForInType(int feedback_vector_slot) {
+ Handle<Object> value = GetInfo(feedback_vector_slot);
return value->IsSmi() &&
- Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker
+ Smi::cast(*value)->value() == TypeFeedbackInfo::kForInFastCaseMarker
? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
}
@@ -134,30 +155,31 @@ KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode(
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- if (info->IsAllocationSite()) {
- return Handle<JSFunction>(isolate()->global_context()->array_function());
- } else {
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ if (FLAG_pretenuring_call_new || info->IsJSFunction()) {
return Handle<JSFunction>::cast(info);
}
+
+ ASSERT(info->IsAllocationSite());
+ return Handle<JSFunction>(isolate()->native_context()->array_function());
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- if (info->IsAllocationSite()) {
- return Handle<JSFunction>(isolate()->global_context()->array_function());
- } else {
+Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ if (FLAG_pretenuring_call_new || info->IsJSFunction()) {
return Handle<JSFunction>::cast(info);
}
+
+ ASSERT(info->IsAllocationSite());
+ return Handle<JSFunction>(isolate()->native_context()->array_function());
}
-Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(
- TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- if (info->IsAllocationSite()) {
+Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ if (FLAG_pretenuring_call_new || info->IsAllocationSite()) {
return Handle<AllocationSite>::cast(info);
}
return Handle<AllocationSite>::null();
@@ -206,7 +228,7 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
CompareIC::StubInfoToType(
stub_minor_key, left_type, right_type, combined_type, map, zone());
} else if (code->is_compare_nil_ic_stub()) {
- CompareNilICStub stub(code->extended_extra_ic_state());
+ CompareNilICStub stub(code->extra_ic_state());
*combined_type = stub.GetType(zone(), map);
*left_type = *right_type = stub.GetInputType(zone(), map);
}
@@ -233,7 +255,7 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
}
Handle<Code> code = Handle<Code>::cast(object);
ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpIC::State state(code->extended_extra_ic_state());
+ BinaryOpIC::State state(code->extra_ic_state());
ASSERT_EQ(op, state.op());
*left = state.GetLeftType(zone());
@@ -255,7 +277,7 @@ Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
if (!object->IsCode()) return Type::None(zone());
Handle<Code> code = Handle<Code>::cast(object);
ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpIC::State state(code->extended_extra_ic_state());
+ BinaryOpIC::State state(code->extra_ic_state());
return state.GetLeftType(zone());
}
@@ -267,9 +289,7 @@ void TypeFeedbackOracle::PropertyReceiverTypes(
FunctionPrototypeStub proto_stub(Code::LOAD_IC);
*is_prototype = LoadIsStub(id, &proto_stub);
if (!*is_prototype) {
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
CollectReceiverTypes(id, name, flags, receiver_types);
}
}
@@ -290,9 +310,7 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
void TypeFeedbackOracle::AssignmentReceiverTypes(
TypeFeedbackId id, Handle<String> name, SmallMapList* receiver_types) {
receiver_types->Clear();
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
CollectReceiverTypes(id, name, flags, receiver_types);
}
@@ -409,7 +427,6 @@ void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
GetRelocInfos(code, &infos);
CreateDictionary(code, &infos);
ProcessRelocInfos(&infos);
- ProcessTypeFeedbackCells(code);
// Allocate handle in the parent scope.
dictionary_ = scope.CloseAndEscape(dictionary_);
}
@@ -427,24 +444,21 @@ void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
ZoneList<RelocInfo>* infos) {
AllowHeapAllocation allocation_allowed;
- int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
- ? TypeFeedbackInfo::cast(code->type_feedback_info())->
- type_feedback_cells()->CellCount()
- : 0;
- int length = infos->length() + cell_count;
- byte* old_start = code->instruction_start();
- dictionary_ = isolate()->factory()->NewUnseededNumberDictionary(length);
- byte* new_start = code->instruction_start();
- RelocateRelocInfos(infos, old_start, new_start);
+ Code* old_code = *code;
+ dictionary_ =
+ isolate()->factory()->NewUnseededNumberDictionary(infos->length());
+ RelocateRelocInfos(infos, old_code, *code);
}
void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos,
- byte* old_start,
- byte* new_start) {
+ Code* old_code,
+ Code* new_code) {
for (int i = 0; i < infos->length(); i++) {
RelocInfo* info = &(*infos)[i];
- info->set_pc(new_start + (info->pc() - old_start));
+ info->set_host(new_code);
+ info->set_pc(new_code->instruction_start() +
+ (info->pc() - old_code->instruction_start()));
}
}
@@ -475,26 +489,6 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
}
-void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
- Object* raw_info = code->type_feedback_info();
- if (!raw_info->IsTypeFeedbackInfo()) return;
- Handle<TypeFeedbackCells> cache(
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
- for (int i = 0; i < cache->CellCount(); i++) {
- TypeFeedbackId ast_id = cache->AstId(i);
- Cell* cell = cache->GetCell(i);
- Object* value = cell->value();
- if (value->IsSmi() ||
- value->IsAllocationSite() ||
- (value->IsJSFunction() &&
- !CanRetainOtherContext(JSFunction::cast(value),
- *native_context_))) {
- SetInfo(ast_id, cell);
- }
- }
-}
-
-
void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
ASSERT(dictionary_->FindEntry(IdToKey(ast_id)) ==
UnseededNumberDictionary::kNotFound);
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 8661d5057b..5bf653f1c2 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -50,14 +50,16 @@ class TypeFeedbackOracle: public ZoneObject {
bool LoadIsUninitialized(TypeFeedbackId id);
bool StoreIsUninitialized(TypeFeedbackId id);
bool StoreIsKeyedPolymorphic(TypeFeedbackId id);
+ bool CallIsMonomorphic(int slot);
bool CallIsMonomorphic(TypeFeedbackId aid);
- bool CallNewIsMonomorphic(TypeFeedbackId id);
+ bool KeyedArrayCallIsHoley(TypeFeedbackId id);
+ bool CallNewIsMonomorphic(int slot);
// TODO(1571) We can't use ForInStatement::ForInType as the return value due
// to various cycles in our headers.
// TODO(rossberg): once all oracle access is removed from ast.cc, it should
// be possible.
- byte ForInType(TypeFeedbackId id);
+ byte ForInType(int feedback_vector_slot);
KeyedAccessStoreMode GetStoreMode(TypeFeedbackId id);
@@ -84,9 +86,9 @@ class TypeFeedbackOracle: public ZoneObject {
static bool CanRetainOtherContext(JSFunction* function,
Context* native_context);
- Handle<JSFunction> GetCallTarget(TypeFeedbackId id);
- Handle<JSFunction> GetCallNewTarget(TypeFeedbackId id);
- Handle<AllocationSite> GetCallNewAllocationSite(TypeFeedbackId id);
+ Handle<JSFunction> GetCallTarget(int slot);
+ Handle<JSFunction> GetCallNewTarget(int slot);
+ Handle<AllocationSite> GetCallNewAllocationSite(int slot);
bool LoadIsBuiltin(TypeFeedbackId id, Builtins::Name builtin_id);
bool LoadIsStub(TypeFeedbackId id, ICStub* stub);
@@ -127,19 +129,23 @@ class TypeFeedbackOracle: public ZoneObject {
void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
void CreateDictionary(Handle<Code> code, ZoneList<RelocInfo>* infos);
void RelocateRelocInfos(ZoneList<RelocInfo>* infos,
- byte* old_start,
- byte* new_start);
+ Code* old_code,
+ Code* new_code);
void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
- void ProcessTypeFeedbackCells(Handle<Code> code);
// Returns an element from the backing store. Returns undefined if
// there is no information.
Handle<Object> GetInfo(TypeFeedbackId id);
+ // Returns an element from the type feedback vector. Returns undefined
+ // if there is no information.
+ Handle<Object> GetInfo(int slot);
+
private:
Handle<Context> native_context_;
Zone* zone_;
Handle<UnseededNumberDictionary> dictionary_;
+ Handle<FixedArray> feedback_vector_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 4195dd5ea8..109d627008 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -66,7 +66,7 @@ macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
if (offset % ELEMENT_SIZE !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
- "start offset", "NAME", ELEMENT_SIZE);
+ ["start offset", "NAME", ELEMENT_SIZE]);
}
if (offset > bufferByteLength) {
throw MakeRangeError("invalid_typed_array_offset");
@@ -78,7 +78,7 @@ macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
if (IS_UNDEFINED(length)) {
if (bufferByteLength % ELEMENT_SIZE !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
- "byte length", "NAME", ELEMENT_SIZE);
+ ["byte length", "NAME", ELEMENT_SIZE]);
}
newByteLength = bufferByteLength - offset;
newLength = newByteLength / ELEMENT_SIZE;
@@ -87,28 +87,32 @@ macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
newByteLength = newLength * ELEMENT_SIZE;
}
if ((offset + newByteLength > bufferByteLength)
- || (newLength > %MaxSmi())) {
+ || (newLength > %_MaxSmi())) {
throw MakeRangeError("invalid_typed_array_length");
}
- %TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
}
function NAMEConstructByLength(obj, length) {
var l = IS_UNDEFINED(length) ?
0 : ToPositiveInteger(length, "invalid_typed_array_length");
- if (l > %MaxSmi()) {
+ if (l > %_MaxSmi()) {
throw MakeRangeError("invalid_typed_array_length");
}
var byteLength = l * ELEMENT_SIZE;
- var buffer = new $ArrayBuffer(byteLength);
- %TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
+ if (byteLength > %_TypedArrayMaxSizeInHeap()) {
+ var buffer = new $ArrayBuffer(byteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
+ } else {
+ %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength);
+ }
}
function NAMEConstructByArrayLike(obj, arrayLike) {
var length = arrayLike.length;
var l = ToPositiveInteger(length, "invalid_typed_array_length");
- if (l > %MaxSmi()) {
+ if (l > %_MaxSmi()) {
throw MakeRangeError("invalid_typed_array_length");
}
if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) {
@@ -257,7 +261,7 @@ function TypedArraySet(obj, offset) {
throw MakeTypeError("typed_array_set_negative_offset");
}
- if (intOffset > %MaxSmi()) {
+ if (intOffset > %_MaxSmi()) {
throw MakeRangeError("typed_array_set_source_too_large");
}
switch (%TypedArraySetFastCases(this, obj, intOffset)) {
@@ -350,7 +354,7 @@ function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
if (length < 0 || offset + length > bufferByteLength) {
throw new MakeRangeError('invalid_data_view_length');
}
- %DataViewInitialize(this, buffer, offset, length);
+ %_DataViewInitialize(this, buffer, offset, length);
} else {
throw MakeTypeError('constructor_not_function', ["DataView"]);
}
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 7867899d71..e269582ca0 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -141,29 +141,32 @@ int TypeImpl<Config>::LubBitset() {
}
return bitset;
} else if (this->IsClass()) {
- return LubBitset(*this->AsClass());
+ int bitset = Config::lub_bitset(this);
+ return bitset ? bitset : LubBitset(*this->AsClass());
} else {
- return LubBitset(*this->AsConstant());
+ int bitset = Config::lub_bitset(this);
+ return bitset ? bitset : LubBitset(*this->AsConstant());
}
}
template<class Config>
int TypeImpl<Config>::LubBitset(i::Object* value) {
- if (value->IsSmi()) return kSmi;
+ if (value->IsSmi()) return kSignedSmall & kTaggedInt;
i::Map* map = i::HeapObject::cast(value)->map();
if (map->instance_type() == HEAP_NUMBER_TYPE) {
int32_t i;
uint32_t u;
- if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32;
- if (value->ToUint32(&u)) return kUnsigned32;
- return kDouble;
+ return kTaggedPtr & (
+ value->ToInt32(&i) ? (Smi::IsValid(i) ? kSignedSmall : kOtherSigned32) :
+ value->ToUint32(&u) ? kUnsigned32 : kFloat);
}
if (map->instance_type() == ODDBALL_TYPE) {
if (value->IsUndefined()) return kUndefined;
if (value->IsNull()) return kNull;
if (value->IsBoolean()) return kBoolean;
if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone?
+ if (value->IsUninitialized()) return kNone;
UNREACHABLE();
}
return LubBitset(map);
@@ -201,7 +204,7 @@ int TypeImpl<Config>::LubBitset(i::Map* map) {
case ODDBALL_TYPE:
return kOddball;
case HEAP_NUMBER_TYPE:
- return kDouble;
+ return kFloat & kTaggedPtr;
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_OBJECT_TYPE:
@@ -244,7 +247,7 @@ int TypeImpl<Config>::LubBitset(i::Map* map) {
case EXECUTABLE_ACCESSOR_INFO_TYPE:
case ACCESSOR_PAIR_TYPE:
case FIXED_ARRAY_TYPE:
- return kInternal;
+ return kInternal & kTaggedPtr;
default:
UNREACHABLE();
return kNone;
@@ -270,13 +273,12 @@ int TypeImpl<Config>::GlbBitset() {
template<class Config>
typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::OfCurrently(
i::Handle<i::Object> value, Region* region) {
- if (value->IsSmi()) return Smi(region);
- i::Map* map = i::HeapObject::cast(*value)->map();
- if (map->instance_type() == HEAP_NUMBER_TYPE ||
- map->instance_type() == ODDBALL_TYPE) {
+ if (value->IsSmi() ||
+ i::HeapObject::cast(*value)->map()->instance_type() == HEAP_NUMBER_TYPE ||
+ i::HeapObject::cast(*value)->map()->instance_type() == ODDBALL_TYPE) {
return Of(value, region);
}
- return Class(i::handle(map), region);
+ return Class(i::handle(i::HeapObject::cast(*value)->map()), region);
}
@@ -337,10 +339,10 @@ template<class Config>
bool TypeImpl<Config>::Maybe(TypeImpl* that) {
// Fast path for bitsets.
if (this->IsBitset()) {
- return (this->AsBitset() & that->LubBitset()) != 0;
+ return IsInhabited(this->AsBitset() & that->LubBitset());
}
if (that->IsBitset()) {
- return (this->LubBitset() & that->AsBitset()) != 0;
+ return IsInhabited(this->LubBitset() & that->AsBitset());
}
// (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
@@ -547,9 +549,9 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
if (type->IsBitset()) {
return Config::from_bitset(type->AsBitset(), region);
} else if (type->IsClass()) {
- return Config::from_class(type->AsClass(), region);
+ return Config::from_class(type->AsClass(), type->LubBitset(), region);
} else if (type->IsConstant()) {
- return Config::from_constant(type->AsConstant(), region);
+ return Config::from_constant(type->AsConstant(), type->LubBitset(), region);
} else {
ASSERT(type->IsUnion());
typename OtherType::UnionedHandle unioned = type->AsUnion();
@@ -567,7 +569,7 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
// TODO(rossberg): this does not belong here.
Representation Representation::FromType(Type* type) {
if (type->Is(Type::None())) return Representation::None();
- if (type->Is(Type::Smi())) return Representation::Smi();
+ if (type->Is(Type::SignedSmall())) return Representation::Smi();
if (type->Is(Type::Signed32())) return Representation::Integer32();
if (type->Is(Type::Number())) return Representation::Double();
return Representation::Tagged();
@@ -576,8 +578,8 @@ Representation Representation::FromType(Type* type) {
#ifdef OBJECT_PRINT
template<class Config>
-void TypeImpl<Config>::TypePrint() {
- TypePrint(stdout);
+void TypeImpl<Config>::TypePrint(PrintDimension dim) {
+ TypePrint(stdout, dim);
PrintF(stdout, "\n");
Flush(stdout);
}
@@ -586,9 +588,17 @@ void TypeImpl<Config>::TypePrint() {
template<class Config>
const char* TypeImpl<Config>::bitset_name(int bitset) {
switch (bitset) {
- #define PRINT_COMPOSED_TYPE(type, value) case k##type: return #type;
- BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE)
+ case kAny & kRepresentation: return "Any";
+ #define PRINT_COMPOSED_TYPE(type, value) \
+ case k##type & kRepresentation: return #type;
+ REPRESENTATION_BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE)
#undef PRINT_COMPOSED_TYPE
+
+ #define PRINT_COMPOSED_TYPE(type, value) \
+ case k##type & kSemantic: return #type;
+ SEMANTIC_BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE)
+ #undef PRINT_COMPOSED_TYPE
+
default:
return NULL;
}
@@ -596,23 +606,54 @@ const char* TypeImpl<Config>::bitset_name(int bitset) {
template<class Config>
-void TypeImpl<Config>::TypePrint(FILE* out) {
+void TypeImpl<Config>::BitsetTypePrint(FILE* out, int bitset) {
+ const char* name = bitset_name(bitset);
+ if (name != NULL) {
+ PrintF(out, "%s", name);
+ } else {
+ static const int named_bitsets[] = {
+ #define BITSET_CONSTANT(type, value) k##type & kRepresentation,
+ REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ #undef BITSET_CONSTANT
+
+ #define BITSET_CONSTANT(type, value) k##type & kSemantic,
+ SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ #undef BITSET_CONSTANT
+ };
+
+ bool is_first = true;
+ PrintF(out, "(");
+ for (int i(ARRAY_SIZE(named_bitsets) - 1); bitset != 0 && i >= 0; --i) {
+ int subset = named_bitsets[i];
+ if ((bitset & subset) == subset) {
+ if (!is_first) PrintF(out, " | ");
+ is_first = false;
+ PrintF(out, "%s", bitset_name(subset));
+ bitset -= subset;
+ }
+ }
+ ASSERT(bitset == 0);
+ PrintF(out, ")");
+ }
+}
+
+
+template<class Config>
+void TypeImpl<Config>::TypePrint(FILE* out, PrintDimension dim) {
if (this->IsBitset()) {
int bitset = this->AsBitset();
- const char* name = bitset_name(bitset);
- if (name != NULL) {
- PrintF(out, "%s", name);
- } else {
- bool is_first = true;
- PrintF(out, "(");
- for (int mask = 1; mask != 0; mask = mask << 1) {
- if ((bitset & mask) != 0) {
- if (!is_first) PrintF(out, " | ");
- is_first = false;
- PrintF(out, "%s", bitset_name(mask));
- }
- }
- PrintF(out, ")");
+ switch (dim) {
+ case BOTH_DIMS:
+ BitsetTypePrint(out, bitset & kSemantic);
+ PrintF("/");
+ BitsetTypePrint(out, bitset & kRepresentation);
+ break;
+ case SEMANTIC_DIM:
+ BitsetTypePrint(out, bitset & kSemantic);
+ break;
+ case REPRESENTATION_DIM:
+ BitsetTypePrint(out, bitset & kRepresentation);
+ break;
}
} else if (this->IsConstant()) {
PrintF(out, "Constant(%p : ", static_cast<void*>(*this->AsConstant()));
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 99a809dc10..4569d131b1 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -42,7 +42,10 @@ namespace internal {
// can express class types (a.k.a. specific maps) and singleton types (i.e.,
// concrete constants).
//
-// The following equations and inequations hold:
+// Types consist of two dimensions: semantic (value range) and representation.
+// Both are related through subtyping.
+//
+// The following equations and inequations hold for the semantic axis:
//
// None <= T
// T <= Any
@@ -54,13 +57,12 @@ namespace internal {
// UniqueName = InternalizedString \/ Symbol
// InternalizedString < String
//
-// Allocated = Receiver \/ Number \/ Name
-// Detectable = Allocated - Undetectable
-// Undetectable < Object
// Receiver = Object \/ Proxy
// Array < Object
// Function < Object
// RegExp < Object
+// Undetectable < Object
+// Detectable = Receiver \/ Number \/ Name - Undetectable
//
// Class(map) < T iff instance_type(map) < T
// Constant(x) < T iff instance_type(map(x)) < T
@@ -70,65 +72,121 @@ namespace internal {
// TODO(rossberg): the latter is not currently true for proxies, because of fix,
// but will hold once we implement direct proxies.
//
+// For the representation axis, the following holds:
+//
+// None <= R
+// R <= Any
+//
+// UntaggedInt <= UntaggedInt8 \/ UntaggedInt16 \/ UntaggedInt32)
+// UntaggedFloat <= UntaggedFloat32 \/ UntaggedFloat64
+// UntaggedNumber <= UntaggedInt \/ UntaggedFloat
+// Untagged <= UntaggedNumber \/ UntaggedPtr
+// Tagged <= TaggedInt \/ TaggedPtr
+//
+// Subtyping relates the two dimensions, for example:
+//
+// Number <= Tagged \/ UntaggedNumber
+// Object <= TaggedPtr \/ UntaggedPtr
+//
+// That holds because the semantic type constructors defined by the API create
+// types that allow for all possible representations, and dually, the ones for
+// representation types initially include all semantic ranges. Representations
+// can then e.g. be narrowed for a given semantic type using intersection:
+//
+// SignedSmall /\ TaggedInt (a 'smi')
+// Number /\ TaggedPtr (a heap number)
+//
// There are two main functions for testing types:
//
// T1->Is(T2) -- tests whether T1 is included in T2 (i.e., T1 <= T2)
// T1->Maybe(T2) -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
//
// Typically, the former is to be used to select representations (e.g., via
-// T->Is(Integer31())), and the to check whether a specific case needs handling
-// (e.g., via T->Maybe(Number())).
+// T->Is(SignedSmall())), and the latter to check whether a specific case needs
+// handling (e.g., via T->Maybe(Number())).
//
// There is no functionality to discover whether a type is a leaf in the
// lattice. That is intentional. It should always be possible to refine the
// lattice (e.g., splitting up number types further) without invalidating any
// existing assumptions or tests.
-//
// Consequently, do not use pointer equality for type tests, always use Is!
//
// Internally, all 'primitive' types, and their unions, are represented as
-// bitsets via smis. Class is a heap pointer to the respective map. Only
-// Constant's, or unions containing Class'es or Constant's, require allocation.
+// bitsets. Class is a heap pointer to the respective map. Only Constant's, or
+// unions containing Class'es or Constant's, currently require allocation.
// Note that the bitset representation is closed under both Union and Intersect.
//
-// The type representation is heap-allocated, so cannot (currently) be used in
-// a concurrent compilation context.
-
-
-#define BITSET_TYPE_LIST(V) \
- V(None, 0) \
- V(Null, 1 << 0) \
- V(Undefined, 1 << 1) \
- V(Boolean, 1 << 2) \
- V(Smi, 1 << 3) \
- V(OtherSigned32, 1 << 4) \
- V(Unsigned32, 1 << 5) \
- V(Double, 1 << 6) \
- V(Symbol, 1 << 7) \
- V(InternalizedString, 1 << 8) \
- V(OtherString, 1 << 9) \
- V(Undetectable, 1 << 10) \
- V(Array, 1 << 11) \
- V(Function, 1 << 12) \
- V(RegExp, 1 << 13) \
- V(OtherObject, 1 << 14) \
- V(Proxy, 1 << 15) \
- V(Internal, 1 << 16) \
+// There are two type representations, using different allocation:
+//
+// - class Type (zone-allocated, for compiler and concurrent compilation)
+// - class HeapType (heap-allocated, for persistent types)
+//
+// Both provide the same API, and the Convert method can be used to interconvert
+// them. For zone types, no query method touches the heap, only constructors do.
+
+
+#define MASK_BITSET_TYPE_LIST(V) \
+ V(Representation, static_cast<int>(0xff800000)) \
+ V(Semantic, static_cast<int>(0x007fffff))
+
+#define REPRESENTATION(k) ((k) & kRepresentation)
+#define SEMANTIC(k) ((k) & kSemantic)
+
+#define REPRESENTATION_BITSET_TYPE_LIST(V) \
+ V(None, 0) \
+ V(UntaggedInt8, 1 << 23 | kSemantic) \
+ V(UntaggedInt16, 1 << 24 | kSemantic) \
+ V(UntaggedInt32, 1 << 25 | kSemantic) \
+ V(UntaggedFloat32, 1 << 26 | kSemantic) \
+ V(UntaggedFloat64, 1 << 27 | kSemantic) \
+ V(UntaggedPtr, 1 << 28 | kSemantic) \
+ V(TaggedInt, 1 << 29 | kSemantic) \
+ V(TaggedPtr, -1 << 30 | kSemantic) /* MSB has to be sign-extended */ \
\
- V(Oddball, kBoolean | kNull | kUndefined) \
- V(Signed32, kSmi | kOtherSigned32) \
- V(Number, kSigned32 | kUnsigned32 | kDouble) \
- V(String, kInternalizedString | kOtherString) \
- V(UniqueName, kSymbol | kInternalizedString) \
- V(Name, kSymbol | kString) \
- V(NumberOrString, kNumber | kString) \
- V(Object, kUndetectable | kArray | kFunction | \
- kRegExp | kOtherObject) \
- V(Receiver, kObject | kProxy) \
- V(Allocated, kDouble | kName | kReceiver) \
- V(Any, kOddball | kNumber | kAllocated | kInternal) \
- V(NonNumber, kAny - kNumber) \
- V(Detectable, kAllocated - kUndetectable)
+ V(UntaggedInt, kUntaggedInt8 | kUntaggedInt16 | kUntaggedInt32) \
+ V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \
+ V(UntaggedNumber, kUntaggedInt | kUntaggedFloat) \
+ V(Untagged, kUntaggedNumber | kUntaggedPtr) \
+ V(Tagged, kTaggedInt | kTaggedPtr)
+
+#define SEMANTIC_BITSET_TYPE_LIST(V) \
+ V(Null, 1 << 0 | REPRESENTATION(kTaggedPtr)) \
+ V(Undefined, 1 << 1 | REPRESENTATION(kTaggedPtr)) \
+ V(Boolean, 1 << 2 | REPRESENTATION(kTaggedPtr)) \
+ V(SignedSmall, 1 << 3 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherSigned32, 1 << 4 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Unsigned32, 1 << 5 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Float, 1 << 6 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Symbol, 1 << 7 | REPRESENTATION(kTaggedPtr)) \
+ V(InternalizedString, 1 << 8 | REPRESENTATION(kTaggedPtr)) \
+ V(OtherString, 1 << 9 | REPRESENTATION(kTaggedPtr)) \
+ V(Undetectable, 1 << 10 | REPRESENTATION(kTaggedPtr)) \
+ V(Array, 1 << 11 | REPRESENTATION(kTaggedPtr)) \
+ V(Function, 1 << 12 | REPRESENTATION(kTaggedPtr)) \
+ V(RegExp, 1 << 13 | REPRESENTATION(kTaggedPtr)) \
+ V(OtherObject, 1 << 14 | REPRESENTATION(kTaggedPtr)) \
+ V(Proxy, 1 << 15 | REPRESENTATION(kTaggedPtr)) \
+ V(Internal, 1 << 16 | REPRESENTATION(kTagged | kUntagged)) \
+ \
+ V(Oddball, kBoolean | kNull | kUndefined) \
+ V(Signed32, kSignedSmall | kOtherSigned32) \
+ V(Number, kSigned32 | kUnsigned32 | kFloat) \
+ V(String, kInternalizedString | kOtherString) \
+ V(UniqueName, kSymbol | kInternalizedString) \
+ V(Name, kSymbol | kString) \
+ V(NumberOrString, kNumber | kString) \
+ V(DetectableObject, kArray | kFunction | kRegExp | kOtherObject) \
+ V(DetectableReceiver, kDetectableObject | kProxy) \
+ V(Detectable, kDetectableReceiver | kNumber | kName) \
+ V(Object, kDetectableObject | kUndetectable) \
+ V(Receiver, kObject | kProxy) \
+ V(NonNumber, kOddball | kName | kReceiver | kInternal) \
+ V(Any, kNumber | kNonNumber)
+
+#define BITSET_TYPE_LIST(V) \
+ MASK_BITSET_TYPE_LIST(V) \
+ REPRESENTATION_BITSET_TYPE_LIST(V) \
+ SEMANTIC_BITSET_TYPE_LIST(V)
// struct Config {
@@ -147,14 +205,15 @@ namespace internal {
// static Handle<Unioned>::type as_union(Type*);
// static Type* from_bitset(int bitset);
// static Handle<Type>::type from_bitset(int bitset, Region*);
-// static Handle<Type>::type from_class(i::Handle<i::Map>, Region*)
-// static Handle<Type>::type from_constant(i::Handle<i::Object>, Region*);
+// static Handle<Type>::type from_class(i::Handle<Map>, int lub, Region*);
+// static Handle<Type>::type from_constant(i::Handle<Object>, int, Region*);
// static Handle<Type>::type from_union(Handle<Unioned>::type);
// static Handle<Unioned>::type union_create(int size, Region*);
// static void union_shrink(Handle<Unioned>::type, int size);
// static Handle<Type>::type union_get(Handle<Unioned>::type, int);
// static void union_set(Handle<Unioned>::type, int, Handle<Type>::type);
// static int union_length(Handle<Unioned>::type);
+// static int lub_bitset(Type*);
// }
template<class Config>
class TypeImpl : public Config::Base {
@@ -171,10 +230,10 @@ class TypeImpl : public Config::Base {
#undef DEFINE_TYPE_CONSTRUCTOR
static TypeHandle Class(i::Handle<i::Map> map, Region* region) {
- return Config::from_class(map, region);
+ return Config::from_class(map, LubBitset(*map), region);
}
static TypeHandle Constant(i::Handle<i::Object> value, Region* region) {
- return Config::from_constant(value, region);
+ return Config::from_constant(value, LubBitset(*value), region);
}
static TypeHandle Union(TypeHandle type1, TypeHandle type2, Region* reg);
@@ -248,8 +307,9 @@ class TypeImpl : public Config::Base {
typename OtherTypeImpl::TypeHandle type, Region* region);
#ifdef OBJECT_PRINT
- void TypePrint();
- void TypePrint(FILE* out);
+ enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
+ void TypePrint(PrintDimension = BOTH_DIMS);
+ void TypePrint(FILE* out, PrintDimension = BOTH_DIMS);
#endif
private:
@@ -286,6 +346,10 @@ class TypeImpl : public Config::Base {
bool SlowIs(TypeImpl* that);
+ static bool IsInhabited(int bitset) {
+ return (bitset & kRepresentation) && (bitset & kSemantic);
+ }
+
int LubBitset(); // least upper bound that's a bitset
int GlbBitset(); // greatest lower bound that's a bitset
@@ -300,6 +364,7 @@ class TypeImpl : public Config::Base {
#ifdef OBJECT_PRINT
static const char* bitset_name(int bitset);
+ static void BitsetTypePrint(FILE* out, int bitset);
#endif
};
@@ -335,7 +400,7 @@ struct ZoneTypeConfig {
}
template<class T>
static void tagged_set(Tagged* tagged, int i, T value) {
- tagged->at(i + 1) = reinterpret_cast<T>(value);
+ tagged->at(i + 1) = reinterpret_cast<void*>(value);
}
static int tagged_length(Tagged* tagged) {
return tagged->length() - 1;
@@ -375,11 +440,11 @@ struct ZoneTypeConfig {
}
static i::Handle<i::Map> as_class(Type* type) {
ASSERT(is_class(type));
- return i::Handle<i::Map>(tagged_get<i::Map**>(as_tagged(type), 0));
+ return i::Handle<i::Map>(tagged_get<i::Map**>(as_tagged(type), 1));
}
static i::Handle<i::Object> as_constant(Type* type) {
ASSERT(is_constant(type));
- return i::Handle<i::Object>(tagged_get<i::Object**>(as_tagged(type), 0));
+ return i::Handle<i::Object>(tagged_get<i::Object**>(as_tagged(type), 1));
}
static Unioned* as_union(Type* type) {
ASSERT(is_union(type));
@@ -399,14 +464,16 @@ struct ZoneTypeConfig {
static Type* from_tagged(Tagged* tagged) {
return reinterpret_cast<Type*>(tagged);
}
- static Type* from_class(i::Handle<i::Map> map, Zone* zone) {
- Tagged* tagged = tagged_create(kClassTag, 1, zone);
- tagged_set(tagged, 0, map.location());
+ static Type* from_class(i::Handle<i::Map> map, int lub, Zone* zone) {
+ Tagged* tagged = tagged_create(kClassTag, 2, zone);
+ tagged_set(tagged, 0, lub);
+ tagged_set(tagged, 1, map.location());
return from_tagged(tagged);
}
- static Type* from_constant(i::Handle<i::Object> value, Zone* zone) {
- Tagged* tagged = tagged_create(kConstantTag, 1, zone);
- tagged_set(tagged, 0, value.location());
+ static Type* from_constant(i::Handle<i::Object> value, int lub, Zone* zone) {
+ Tagged* tagged = tagged_create(kConstantTag, 2, zone);
+ tagged_set(tagged, 0, lub);
+ tagged_set(tagged, 1, value.location());
return from_tagged(tagged);
}
static Type* from_union(Unioned* unioned) {
@@ -434,6 +501,10 @@ struct ZoneTypeConfig {
static int union_length(Unioned* unioned) {
return tagged_length(tagged_from_union(unioned));
}
+ static int lub_bitset(Type* type) {
+ ASSERT(is_class(type) || is_constant(type));
+ return static_cast<int>(tagged_get<intptr_t>(as_tagged(type), 0));
+ }
};
@@ -475,11 +546,12 @@ struct HeapTypeConfig {
static i::Handle<Type> from_bitset(int bitset, Isolate* isolate) {
return i::handle(from_bitset(bitset), isolate);
}
- static i::Handle<Type> from_class(i::Handle<i::Map> map, Isolate* isolate) {
+ static i::Handle<Type> from_class(
+ i::Handle<i::Map> map, int lub, Isolate* isolate) {
return i::Handle<Type>::cast(i::Handle<Object>::cast(map));
}
static i::Handle<Type> from_constant(
- i::Handle<i::Object> value, Isolate* isolate) {
+ i::Handle<i::Object> value, int lub, Isolate* isolate) {
i::Handle<Box> box = isolate->factory()->NewBox(value);
return i::Handle<Type>::cast(i::Handle<Object>::cast(box));
}
@@ -506,6 +578,9 @@ struct HeapTypeConfig {
static int union_length(i::Handle<Unioned> unioned) {
return unioned->length();
}
+ static int lub_bitset(Type* type) {
+ return 0; // kNone, which causes recomputation.
+ }
};
typedef TypeImpl<ZoneTypeConfig> Type;
@@ -560,6 +635,10 @@ struct BoundsImpl {
TypeHandle upper = Type::Intersect(b.upper, t, region);
return BoundsImpl(lower, upper);
}
+
+ bool Narrows(BoundsImpl that) {
+ return that.lower->Is(this->lower) && this->upper->Is(that.upper);
+ }
};
typedef BoundsImpl<ZoneTypeConfig> Bounds;
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index c7bea40ac6..2a581e293a 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -323,7 +323,7 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
void AstTyper::VisitForInStatement(ForInStatement* stmt) {
// Collect type feedback.
stmt->set_for_in_type(static_cast<ForInStatement::ForInType>(
- oracle()->ForInType(stmt->ForInFeedbackId())));
+ oracle()->ForInType(stmt->ForInFeedbackSlot())));
RECURSE(Visit(stmt->enumerable()));
store_.Forget(); // Control may transfer here via looping or 'continue'.
@@ -530,8 +530,9 @@ void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
RECURSE(Visit(expr->expression()));
if (!expr->expression()->IsProperty() &&
- oracle()->CallIsMonomorphic(expr->CallFeedbackId())) {
- expr->set_target(oracle()->GetCallTarget(expr->CallFeedbackId()));
+ expr->HasCallFeedbackSlot() &&
+ oracle()->CallIsMonomorphic(expr->CallFeedbackSlot())) {
+ expr->set_target(oracle()->GetCallTarget(expr->CallFeedbackSlot()));
}
ZoneList<Expression*>* args = expr->arguments();
@@ -560,7 +561,7 @@ void AstTyper::VisitCallNew(CallNew* expr) {
RECURSE(Visit(arg));
}
- // We don't know anything about the result type.
+ NarrowType(expr, Bounds(Type::None(zone()), Type::Receiver(zone())));
}
@@ -611,7 +612,7 @@ void AstTyper::VisitCountOperation(CountOperation* expr) {
RECURSE(Visit(expr->expression()));
- NarrowType(expr, Bounds(Type::Smi(zone()), Type::Number(zone())));
+ NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
@@ -667,7 +668,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Type* upper = Type::Union(
expr->left()->bounds().upper, expr->right()->bounds().upper, zone());
if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
- Type* lower = Type::Intersect(Type::Smi(zone()), upper, zone());
+ Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
NarrowType(expr, Bounds(lower, upper));
break;
}
@@ -676,7 +677,8 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::SAR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Smi(zone()), Type::Signed32(zone())));
+ NarrowType(expr,
+ Bounds(Type::SignedSmall(zone()), Type::Signed32(zone())));
break;
case Token::SHR:
RECURSE(Visit(expr->left()));
@@ -684,7 +686,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
// TODO(rossberg): The upper bound would be Unsigned32, but since there
// is no 'positive Smi' type for the lower bound, we use the smallest
// union of Smi and Unsigned32 as upper bound instead.
- NarrowType(expr, Bounds(Type::Smi(zone()), Type::Number(zone())));
+ NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
break;
case Token::ADD: {
RECURSE(Visit(expr->left()));
@@ -697,7 +699,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
Type::String(zone()) :
l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
- Type::Smi(zone()) : Type::None(zone());
+ Type::SignedSmall(zone()) : Type::None(zone());
Type* upper =
l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
Type::String(zone()) :
@@ -712,7 +714,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::MOD:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Smi(zone()), Type::Number(zone())));
+ NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index bd32467786..2bef7ab20b 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// This file was generated at 2012-03-06 09:55:58.934483
+// This file was generated at 2014-02-07 15:31:16.733174
#include "unicode-inl.h"
#include <stdlib.h>
@@ -710,28 +710,6 @@ bool Letter::Is(uchar c) {
}
-// Space: point.category == 'Zs'
-
-static const uint16_t kSpaceTable0Size = 4;
-static const int32_t kSpaceTable0[4] = {
- 32, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kSpaceTable1Size = 5;
-static const int32_t kSpaceTable1[5] = {
- 1073741824, 10, 47, 95, 4096 }; // NOLINT
-bool Space::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kSpaceTable0,
- kSpaceTable0Size,
- c);
- case 1: return LookupPredicate(kSpaceTable1,
- kSpaceTable1Size,
- c);
- default: return false;
- }
-}
-
-
// Number: point.category == 'Nd'
static const uint16_t kNumberTable0Size = 56;
@@ -767,14 +745,14 @@ bool Number::Is(uchar c) {
}
-// WhiteSpace: 'Ws' in point.properties
+// WhiteSpace: point.category == 'Zs'
-static const uint16_t kWhiteSpaceTable0Size = 7;
-static const int32_t kWhiteSpaceTable0[7] = {
- 1073741833, 13, 32, 133, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kWhiteSpaceTable1Size = 7;
-static const int32_t kWhiteSpaceTable1[7] = {
- 1073741824, 10, 1073741864, 41, 47, 95, 4096 }; // NOLINT
+static const uint16_t kWhiteSpaceTable0Size = 4;
+static const int32_t kWhiteSpaceTable0[4] = {
+ 32, 160, 5760, 6158 }; // NOLINT
+static const uint16_t kWhiteSpaceTable1Size = 5;
+static const int32_t kWhiteSpaceTable1[5] = {
+ 1073741824, 10, 47, 95, 4096 }; // NOLINT
bool WhiteSpace::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
@@ -1833,8 +1811,6 @@ int UnicodeData::GetByteCount() {
+ kLetterTable5Size * sizeof(int32_t) // NOLINT
+ kLetterTable6Size * sizeof(int32_t) // NOLINT
+ kLetterTable7Size * sizeof(int32_t) // NOLINT
- + kSpaceTable0Size * sizeof(int32_t) // NOLINT
- + kSpaceTable1Size * sizeof(int32_t) // NOLINT
+ kNumberTable0Size * sizeof(int32_t) // NOLINT
+ kNumberTable5Size * sizeof(int32_t) // NOLINT
+ kNumberTable7Size * sizeof(int32_t) // NOLINT
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index bb5506d38e..65a9af58fc 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -226,9 +226,6 @@ struct Lowercase {
struct Letter {
static bool Is(uchar c);
};
-struct Space {
- static bool Is(uchar c);
-};
struct Number {
static bool Is(uchar c);
};
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
index a2f29e4335..2f6008c5a2 100644
--- a/deps/v8/src/unique.h
+++ b/deps/v8/src/unique.h
@@ -142,8 +142,12 @@ class Unique V8_FINAL {
friend class Unique; // For comparing raw_address values.
private:
+ Unique<T>() : raw_address_(NULL) { }
+
Address raw_address_;
Handle<T> handle_;
+
+ friend class SideEffectsTracker;
};
diff --git a/deps/v8/src/uri.h b/deps/v8/src/uri.h
index ee1baeb512..1e73ddd3d2 100644
--- a/deps/v8/src/uri.h
+++ b/deps/v8/src/uri.h
@@ -127,9 +127,11 @@ Handle<String> URIUnescape::UnescapeSlow(
int dest_position = 0;
Handle<String> second_part;
+ ASSERT(unescaped_length <= String::kMaxLength);
if (one_byte) {
Handle<SeqOneByteString> dest =
isolate->factory()->NewRawOneByteString(unescaped_length);
+ ASSERT(!dest.is_null());
DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
@@ -142,6 +144,7 @@ Handle<String> URIUnescape::UnescapeSlow(
} else {
Handle<SeqTwoByteString> dest =
isolate->factory()->NewRawTwoByteString(unescaped_length);
+ ASSERT(!dest.is_null());
DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
@@ -263,10 +266,7 @@ Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
// We don't allow strings that are longer than a maximal length.
ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
- if (escaped_length > String::kMaxLength) {
- isolate->context()->mark_out_of_memory();
- return Handle<String>::null();
- }
+ if (escaped_length > String::kMaxLength) break; // Provoke exception.
}
}
@@ -275,6 +275,7 @@ Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
Handle<SeqOneByteString> dest =
isolate->factory()->NewRawOneByteString(escaped_length);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, dest, Handle<String>());
int dest_position = 0;
{ DisallowHeapAllocation no_allocation;
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 8462615200..6838cb069d 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -97,18 +97,4 @@ char* SimpleStringBuilder::Finalize() {
}
-const DivMagicNumbers DivMagicNumberFor(int32_t divisor) {
- switch (divisor) {
- case 3: return DivMagicNumberFor3;
- case 5: return DivMagicNumberFor5;
- case 7: return DivMagicNumberFor7;
- case 9: return DivMagicNumberFor9;
- case 11: return DivMagicNumberFor11;
- case 25: return DivMagicNumberFor25;
- case 125: return DivMagicNumberFor125;
- case 625: return DivMagicNumberFor625;
- default: return InvalidDivMagicNumber;
- }
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 2e7c494d63..753822614c 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -105,32 +105,6 @@ inline int MostSignificantBit(uint32_t x) {
}
-// Magic numbers for integer division.
-// These are kind of 2's complement reciprocal of the divisors.
-// Details and proofs can be found in:
-// - Hacker's Delight, Henry S. Warren, Jr.
-// - The PowerPC Compiler Writer’s Guide
-// and probably many others.
-// See details in the implementation of the algorithm in
-// lithium-codegen-arm.cc : LCodeGen::TryEmitSignedIntegerDivisionByConstant().
-struct DivMagicNumbers {
- unsigned M;
- unsigned s;
-};
-
-const DivMagicNumbers InvalidDivMagicNumber= {0, 0};
-const DivMagicNumbers DivMagicNumberFor3 = {0x55555556, 0};
-const DivMagicNumbers DivMagicNumberFor5 = {0x66666667, 1};
-const DivMagicNumbers DivMagicNumberFor7 = {0x92492493, 2};
-const DivMagicNumbers DivMagicNumberFor9 = {0x38e38e39, 1};
-const DivMagicNumbers DivMagicNumberFor11 = {0x2e8ba2e9, 1};
-const DivMagicNumbers DivMagicNumberFor25 = {0x51eb851f, 3};
-const DivMagicNumbers DivMagicNumberFor125 = {0x10624dd3, 3};
-const DivMagicNumbers DivMagicNumberFor625 = {0x68db8bad, 8};
-
-const DivMagicNumbers DivMagicNumberFor(int32_t divisor);
-
-
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.
@@ -172,6 +146,17 @@ inline T RoundUp(T x, intptr_t m) {
}
+// Increment a pointer until it has the specified alignment.
+// This works like RoundUp, but it works correctly on pointer types where
+// sizeof(*pointer) might not be 1.
+template<class T>
+T AlignUp(T pointer, size_t alignment) {
+ ASSERT(sizeof(pointer) == sizeof(uintptr_t));
+ uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
+ return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
+}
+
+
template <typename T>
int Compare(const T& a, const T& b) {
if (a == b)
@@ -272,6 +257,12 @@ inline int StrLength(const char* string) {
}
+// TODO(svenpanne) Clean up the whole power-of-2 mess.
+inline int32_t WhichPowerOf2Abs(int32_t x) {
+ return (x == kMinInt) ? 31 : WhichPowerOf2(Abs(x));
+}
+
+
// ----------------------------------------------------------------------------
// BitField is a help template for encoding and decode bitfield with
// unsigned content.
@@ -1089,6 +1080,66 @@ class EnumSet {
T bits_;
};
+// Bit field extraction.
+inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
+ return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
+}
+
+inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
+ return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
+}
+
+inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
+ return (x << (31 - msb)) >> (lsb + 31 - msb);
+}
+
+inline int signed_bitextract_64(int msb, int lsb, int x) {
+ // TODO(jbramley): This is broken for big bitfields.
+ return (x << (63 - msb)) >> (lsb + 63 - msb);
+}
+
+// Check number width.
+inline bool is_intn(int64_t x, unsigned n) {
+ ASSERT((0 < n) && (n < 64));
+ int64_t limit = static_cast<int64_t>(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline bool is_uintn(int64_t x, unsigned n) {
+ ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return !(x >> n);
+}
+
+template <class T>
+inline T truncate_to_intn(T x, unsigned n) {
+ ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return (x & ((static_cast<T>(1) << n) - 1));
+}
+
+#define INT_1_TO_63_LIST(V) \
+V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
+V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
+V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
+V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
+V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
+V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
+V(57) V(58) V(59) V(60) V(61) V(62) V(63)
+
+#define DECLARE_IS_INT_N(N) \
+inline bool is_int##N(int64_t x) { return is_intn(x, N); }
+#define DECLARE_IS_UINT_N(N) \
+template <class T> \
+inline bool is_uint##N(T x) { return is_uintn(x, N); }
+#define DECLARE_TRUNCATE_TO_INT_N(N) \
+template <class T> \
+inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); }
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
+#undef DECLARE_IS_INT_N
+#undef DECLARE_IS_UINT_N
+#undef DECLARE_TRUNCATE_TO_INT_N
class TypeFeedbackId {
public:
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index b89bb7a69b..b49e0eb5f2 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -82,6 +82,8 @@ bool V8::Initialize(Deserializer* des) {
#ifdef V8_USE_DEFAULT_PLATFORM
DefaultPlatform* platform = static_cast<DefaultPlatform*>(platform_);
platform->SetThreadPoolSize(isolate->max_available_threads());
+ // We currently only start the threads early, if we know that we'll use them.
+ if (FLAG_job_based_sweeping) platform->EnsureInitialized();
#endif
return isolate->Init(des);
@@ -148,15 +150,16 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void V8::FireCallCompletedCallback(Isolate* isolate) {
bool has_call_completed_callbacks = call_completed_callbacks_ != NULL;
- bool microtask_pending = isolate->microtask_pending();
- if (!has_call_completed_callbacks && !microtask_pending) return;
+ bool run_microtasks = isolate->autorun_microtasks() &&
+ isolate->microtask_pending();
+ if (!has_call_completed_callbacks && !run_microtasks) return;
HandleScopeImplementer* handle_scope_implementer =
isolate->handle_scope_implementer();
if (!handle_scope_implementer->CallDepthIsZero()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
handle_scope_implementer->IncrementCallDepth();
- if (microtask_pending) Execution::RunMicrotasks(isolate);
+ if (run_microtasks) Execution::RunMicrotasks(isolate);
if (has_call_completed_callbacks) {
for (int i = 0; i < call_completed_callbacks_->length(); i++) {
call_completed_callbacks_->at(i)();
@@ -166,15 +169,27 @@ void V8::FireCallCompletedCallback(Isolate* isolate) {
}
+void V8::RunMicrotasks(Isolate* isolate) {
+ if (!isolate->microtask_pending())
+ return;
+
+ HandleScopeImplementer* handle_scope_implementer =
+ isolate->handle_scope_implementer();
+ ASSERT(handle_scope_implementer->CallDepthIsZero());
+
+ // Increase call depth to prevent recursive callbacks.
+ handle_scope_implementer->IncrementCallDepth();
+ Execution::RunMicrotasks(isolate);
+ handle_scope_implementer->DecrementCallDepth();
+}
+
+
void V8::InitializeOncePerProcessImpl() {
FlagList::EnforceFlagImplications();
- if (FLAG_predictable) {
- if (FLAG_random_seed == 0) {
- // Avoid random seeds in predictable mode.
- FLAG_random_seed = 12347;
- }
- FLAG_hash_seed = 0;
+ if (FLAG_predictable && FLAG_random_seed == 0) {
+ // Avoid random seeds in predictable mode.
+ FLAG_random_seed = 12347;
}
if (FLAG_stress_compaction) {
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 8069e8adda..d3f5a9c839 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -101,6 +101,8 @@ class V8 : public AllStatic {
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
static void FireCallCompletedCallback(Isolate* isolate);
+ static void RunMicrotasks(Isolate* isolate);
+
static v8::ArrayBuffer::Allocator* ArrayBufferAllocator() {
return array_buffer_allocator_;
}
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 7d8d1b7e40..e6cd94df23 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -133,6 +133,7 @@ class Heap;
class HeapObject;
class IC;
class InterceptorInfo;
+class Isolate;
class JSReceiver;
class JSArray;
class JSFunction;
@@ -465,11 +466,11 @@ enum VariableMode {
// User declared variables:
VAR, // declared via 'var', and 'function' declarations
- CONST, // declared via 'const' declarations
+ CONST_LEGACY, // declared via legacy 'const' declarations
LET, // declared via 'let' declarations (first lexical)
- CONST_HARMONY, // declared via 'const' declarations in harmony mode
+ CONST, // declared via 'const' declarations
MODULE, // declared via 'module' declaration (last lexical)
@@ -510,7 +511,7 @@ inline bool IsLexicalVariableMode(VariableMode mode) {
inline bool IsImmutableVariableMode(VariableMode mode) {
- return mode == CONST || (mode >= CONST_HARMONY && mode <= MODULE);
+ return (mode >= CONST && mode <= MODULE) || mode == CONST_LEGACY;
}
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index df663c025e..f183afb968 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -1282,7 +1282,7 @@ function ObjectFreeze(obj) {
throw MakeTypeError("called_on_non_object", ["Object.freeze"]);
}
var isProxy = %IsJSProxy(obj);
- if (isProxy || %HasNonStrictArgumentsElements(obj) || %IsObserved(obj)) {
+ if (isProxy || %HasSloppyArgumentsElements(obj) || %IsObserved(obj)) {
if (isProxy) {
ProxyFix(obj);
}
@@ -1384,15 +1384,19 @@ function ObjectIs(obj1, obj2) {
}
-// Harmony __proto__ getter.
+// ECMA-262, Edition 6, section B.2.2.1.1
function ObjectGetProto() {
- return %GetPrototype(this);
+ return %GetPrototype(ToObject(this));
}
-// Harmony __proto__ setter.
-function ObjectSetProto(obj) {
- return %SetPrototype(this, obj);
+// ECMA-262, Edition 6, section B.2.2.1.2
+function ObjectSetProto(proto) {
+ CHECK_OBJECT_COERCIBLE(this, "Object.prototype.__proto__");
+
+ if ((IS_SPEC_OBJECT(proto) || IS_NULL(proto)) && IS_SPEC_OBJECT(this)) {
+ %SetPrototype(this, proto);
+ }
}
@@ -1889,10 +1893,30 @@ SetUpFunction();
// Eventually, we should move to a real event queue that allows to maintain
// relative ordering of different kinds of tasks.
-RunMicrotasks.runners = new InternalArray;
+function GetMicrotaskQueue() {
+ var microtaskState = %GetMicrotaskState();
+ if (IS_UNDEFINED(microtaskState.queue)) {
+ microtaskState.queue = new InternalArray;
+ }
+ return microtaskState.queue;
+}
function RunMicrotasks() {
while (%SetMicrotaskPending(false)) {
- for (var i in RunMicrotasks.runners) RunMicrotasks.runners[i]();
+ var microtaskState = %GetMicrotaskState();
+ if (IS_UNDEFINED(microtaskState.queue))
+ return;
+
+ var microtasks = microtaskState.queue;
+ microtaskState.queue = new InternalArray;
+
+ for (var i = 0; i < microtasks.length; i++) {
+ microtasks[i]();
+ }
}
}
+
+function EnqueueExternalMicrotask(fn) {
+ GetMicrotaskQueue().push(fn);
+ %SetMicrotaskPending(true);
+}
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 488da42ce6..6c4ea527ca 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -40,9 +40,9 @@ namespace internal {
const char* Variable::Mode2String(VariableMode mode) {
switch (mode) {
case VAR: return "VAR";
- case CONST: return "CONST";
+ case CONST_LEGACY: return "CONST_LEGACY";
case LET: return "LET";
- case CONST_HARMONY: return "CONST_HARMONY";
+ case CONST: return "CONST";
case MODULE: return "MODULE";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 39451d5dfb..401d044463 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -168,7 +168,7 @@ class Variable: public ZoneObject {
// If this field is set, this variable references the stored locally bound
// variable, but it might be shadowed by variable bindings introduced by
- // non-strict 'eval' calls between the reference scope (inclusive) and the
+ // sloppy 'eval' calls between the reference scope (inclusive) and the
// binding scope (exclusive).
Variable* local_if_not_shadowed_;
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 9ba044d5a6..904b067a6a 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 24
-#define BUILD_NUMBER 35
-#define PATCH_LEVEL 22
+#define MINOR_VERSION 25
+#define BUILD_NUMBER 30
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index 658773e6d6..5bee438b65 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -85,8 +85,7 @@ ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
callback_(callback),
previous_scope_(isolate->external_callback_scope()) {
#ifdef USE_SIMULATOR
- int32_t sp = Simulator::current(isolate)->get_register(Simulator::sp);
- scope_address_ = reinterpret_cast<Address>(static_cast<intptr_t>(sp));
+ scope_address_ = Simulator::current(isolate)->get_sp();
#endif
isolate_->set_external_callback_scope(this);
}
diff --git a/deps/v8/src/weak_collection.js b/deps/v8/src/weak_collection.js
new file mode 100644
index 0000000000..81d4ab536e
--- /dev/null
+++ b/deps/v8/src/weak_collection.js
@@ -0,0 +1,206 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+var $WeakMap = global.WeakMap;
+var $WeakSet = global.WeakSet;
+
+
+// -------------------------------------------------------------------
+// Harmony WeakMap
+
+function WeakMapConstructor() {
+ if (%_IsConstructCall()) {
+ %WeakCollectionInitialize(this);
+ } else {
+ throw MakeTypeError('constructor_not_function', ['WeakMap']);
+ }
+}
+
+
+function WeakMapGet(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.get', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionGet(this, key);
+}
+
+
+function WeakMapSet(key, value) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.set', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionSet(this, key, value);
+}
+
+
+function WeakMapHas(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.has', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionHas(this, key);
+}
+
+
+function WeakMapDelete(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.delete', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionDelete(this, key);
+}
+
+
+function WeakMapClear() {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %WeakCollectionInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpWeakMap() {
+ %CheckIsBootstrapping();
+
+ %SetCode($WeakMap, WeakMapConstructor);
+ %FunctionSetPrototype($WeakMap, new $Object());
+ %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the WeakMap prototype object.
+ InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
+ "get", WeakMapGet,
+ "set", WeakMapSet,
+ "has", WeakMapHas,
+ "delete", WeakMapDelete,
+ "clear", WeakMapClear
+ ));
+}
+
+SetUpWeakMap();
+
+
+// -------------------------------------------------------------------
+// Harmony WeakSet
+
+function WeakSetConstructor() {
+ if (%_IsConstructCall()) {
+ %WeakCollectionInitialize(this);
+ } else {
+ throw MakeTypeError('constructor_not_function', ['WeakSet']);
+ }
+}
+
+
+function WeakSetAdd(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.add', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionSet(this, value, true);
+}
+
+
+function WeakSetHas(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.has', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionHas(this, value);
+}
+
+
+function WeakSetDelete(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.delete', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionDelete(this, value);
+}
+
+
+function WeakSetClear() {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %WeakCollectionInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpWeakSet() {
+ %CheckIsBootstrapping();
+
+ %SetCode($WeakSet, WeakSetConstructor);
+ %FunctionSetPrototype($WeakSet, new $Object());
+ %SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the WeakSet prototype object.
+ InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
+ "add", WeakSetAdd,
+ "has", WeakSetHas,
+ "delete", WeakSetDelete,
+ "clear", WeakSetClear
+ ));
+}
+
+SetUpWeakSet();
diff --git a/deps/v8/src/win32-headers.h b/deps/v8/src/win32-headers.h
index 98b0120ea1..ba595b97d4 100644
--- a/deps/v8/src/win32-headers.h
+++ b/deps/v8/src/win32-headers.h
@@ -94,6 +94,7 @@
#undef NONE
#undef ANY
#undef IGNORE
+#undef STRICT
#undef GetObject
#undef CreateSemaphore
#undef Yield
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 073fcbe8e9..a559b62758 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -205,12 +205,15 @@ void Assembler::emit_optional_rex_32(const Operand& op) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
return Memory::int32_at(pc) + pc + 4;
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target) {
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
CPU::FlushICache(pc, sizeof(int32_t));
}
@@ -255,7 +258,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -267,6 +270,12 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
@@ -278,7 +287,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -369,7 +378,7 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -408,14 +417,14 @@ Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
+ Assembler::target_address_at(pc_ + 1, host_));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(*pc_ == kCallOpcode);
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+ Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start());
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index e7c20bb150..60383da015 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -110,7 +110,8 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->movp(kScratchRegister, target, Assembler::RelocInfoNone());
+ patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(target),
+ Assembler::RelocInfoNone());
patcher.masm()->call(kScratchRegister);
// Check that the size of the code generated is as expected.
@@ -750,6 +751,15 @@ void Assembler::bts(const Operand& dst, Register src) {
}
+void Assembler::bsrl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
@@ -934,33 +944,17 @@ void Assembler::cqo() {
}
-void Assembler::decq(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decq(const Operand& dst) {
+void Assembler::emit_dec(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x1, dst);
}
-void Assembler::decl(const Operand& dst) {
+void Assembler::emit_dec(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(1, dst);
}
@@ -999,84 +993,43 @@ void Assembler::hlt() {
}
-void Assembler::idivq(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::idivl(Register src) {
+void Assembler::emit_idiv(Register src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
+ emit_rex(src, size);
emit(0xF7);
emit_modrm(0x7, src);
}
-void Assembler::imul(Register src) {
+void Assembler::emit_imul(Register src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(src);
+ emit_rex(src, size);
emit(0xF7);
emit_modrm(0x5, src);
}
-void Assembler::imul(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::imull(Register dst, Register src) {
+void Assembler::emit_imul(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_modrm(dst, src);
}
-void Assembler::imull(Register dst, const Operand& src) {
+void Assembler::emit_imul(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_operand(dst, src);
}
-void Assembler::imull(Register dst, Register src, Immediate imm) {
+void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
if (is_int8(imm.value_)) {
emit(0x6B);
emit_modrm(dst, src);
@@ -1089,38 +1042,22 @@ void Assembler::imull(Register dst, Register src, Immediate imm) {
}
-void Assembler::incq(Register dst) {
+void Assembler::emit_inc(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x0, dst);
}
-void Assembler::incq(const Operand& dst) {
+void Assembler::emit_inc(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(0, dst);
}
-void Assembler::incl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0, dst);
-}
-
-
void Assembler::int3() {
EnsureSpace ensure_space(this);
emit(0xCC);
@@ -1287,17 +1224,9 @@ void Assembler::jmp(const Operand& src) {
}
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::leal(Register dst, const Operand& src) {
+void Assembler::emit_lea(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x8D);
emit_operand(dst, src);
}
@@ -1536,7 +1465,7 @@ void Assembler::movsxlq(Register dst, const Operand& src) {
}
-void Assembler::movzxbq(Register dst, const Operand& src) {
+void Assembler::emit_movzxb(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
@@ -1547,26 +1476,10 @@ void Assembler::movzxbq(Register dst, const Operand& src) {
}
-void Assembler::movzxbl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwl(Register dst, const Operand& src) {
+void Assembler::emit_movzxw(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1574,8 +1487,10 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
}
-void Assembler::movzxwl(Register dst, Register src) {
+void Assembler::emit_movzxw(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1598,17 +1513,10 @@ void Assembler::repmovsw() {
}
-void Assembler::repmovsl() {
+void Assembler::emit_repmovs(int size) {
EnsureSpace ensure_space(this);
emit(0xF3);
- emit(0xA5);
-}
-
-
-void Assembler::repmovsq() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_rex_64();
+ emit_rex(size);
emit(0xA5);
}
@@ -1621,23 +1529,15 @@ void Assembler::mul(Register src) {
}
-void Assembler::neg(Register dst) {
+void Assembler::emit_neg(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::negl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x3, dst);
}
-void Assembler::neg(const Operand& dst) {
+void Assembler::emit_neg(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
@@ -1651,30 +1551,22 @@ void Assembler::nop() {
}
-void Assembler::not_(Register dst) {
+void Assembler::emit_not(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x2, dst);
}
-void Assembler::not_(const Operand& dst) {
+void Assembler::emit_not(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_operand(2, dst);
}
-void Assembler::notl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
@@ -1752,14 +1644,14 @@ void Assembler::Nop(int n) {
}
-void Assembler::pop(Register dst) {
+void Assembler::popq(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x58 | dst.low_bits());
}
-void Assembler::pop(const Operand& dst) {
+void Assembler::popq(const Operand& dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x8F);
@@ -1773,14 +1665,14 @@ void Assembler::popfq() {
}
-void Assembler::push(Register src) {
+void Assembler::pushq(Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0x50 | src.low_bits());
}
-void Assembler::push(const Operand& src) {
+void Assembler::pushq(const Operand& src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0xFF);
@@ -1788,7 +1680,7 @@ void Assembler::push(const Operand& src) {
}
-void Assembler::push(Immediate value) {
+void Assembler::pushq(Immediate value) {
EnsureSpace ensure_space(this);
if (is_int8(value.value_)) {
emit(0x6A);
@@ -1800,7 +1692,7 @@ void Assembler::push(Immediate value) {
}
-void Assembler::push_imm32(int32_t imm32) {
+void Assembler::pushq_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
emit(0x68);
emitl(imm32);
@@ -1860,36 +1752,18 @@ void Assembler::shrd(Register dst, Register src) {
}
-void Assembler::xchgq(Register dst, Register src) {
+void Assembler::emit_xchg(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
- emit_rex_64(other);
+ emit_rex(other, size);
emit(0x90 | other.low_bits());
} else if (dst.low_bits() == 4) {
- emit_rex_64(dst, src);
- emit(0x87);
- emit_modrm(dst, src);
- } else {
- emit_rex_64(src, dst);
- emit(0x87);
- emit_modrm(src, dst);
- }
-}
-
-
-void Assembler::xchgl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
- Register other = src.is(rax) ? dst : src;
- emit_optional_rex_32(other);
- emit(0x90 | other.low_bits());
- } else if (dst.low_bits() == 4) {
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x87);
emit_modrm(dst, src);
} else {
- emit_optional_rex_32(src, dst);
+ emit_rex(src, dst, size);
emit(0x87);
emit_modrm(src, dst);
}
@@ -1977,21 +1851,21 @@ void Assembler::testb(const Operand& op, Register reg) {
}
-void Assembler::testl(Register dst, Register src) {
+void Assembler::emit_test(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
+ emit_rex(src, dst, size);
emit(0x85);
emit_modrm(src, dst);
} else {
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x85);
emit_modrm(dst, src);
}
}
-void Assembler::testl(Register reg, Immediate mask) {
+void Assembler::emit_test(Register reg, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(reg, mask);
@@ -1999,10 +1873,11 @@ void Assembler::testl(Register reg, Immediate mask) {
}
EnsureSpace ensure_space(this);
if (reg.is(rax)) {
+ emit_rex(rax, size);
emit(0xA9);
emit(mask);
} else {
- emit_optional_rex_32(rax, reg);
+ emit_rex(reg, size);
emit(0xF7);
emit_modrm(0x0, reg);
emit(mask);
@@ -2010,69 +1885,28 @@ void Assembler::testl(Register reg, Immediate mask) {
}
-void Assembler::testl(const Operand& op, Immediate mask) {
+void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(op, mask);
return;
}
EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
+ emit_rex(rax, op, size);
emit(0xF7);
emit_operand(rax, op); // Operation code 0
emit(mask);
}
-void Assembler::testl(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, op);
- emit(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testq(const Operand& op, Register reg) {
+void Assembler::emit_test(const Operand& op, Register reg, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
+ emit_rex(reg, op, size);
emit(0x85);
emit_operand(reg, op);
}
-void Assembler::testq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testq(Register dst, Immediate mask) {
- if (is_uint8(mask.value_)) {
- testb(dst, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- if (dst.is(rax)) {
- emit_rex_64();
- emit(0xA9);
- emit(mask);
- } else {
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0, dst);
- emit(mask);
- }
-}
-
-
// FPU instructions.
@@ -2789,6 +2623,16 @@ void Assembler::movss(const Operand& src, XMMRegister dst) {
}
+void Assembler::psllq(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit(0x0F);
+ emit(0x73);
+ emit_sse_operand(rsi, reg); // rsi == 6
+ emit(imm8);
+}
+
+
void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3172,6 +3016,19 @@ void Assembler::RecordComment(const char* msg, bool force) {
}
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE |
@@ -3185,6 +3042,12 @@ bool RelocInfo::IsCodedSpecially() {
return (1 << rmode_) & kApplyMask;
}
+
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index ef513d1e53..d47ca32e0d 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -44,27 +44,6 @@ namespace internal {
// Utility functions
-// Test whether a 64-bit value is in a specific range.
-inline bool is_uint32(int64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return static_cast<uint64_t>(x) <= kMaxUInt32;
-}
-
-inline bool is_int32(int64_t x) {
- static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
- return is_uint32(x - kMinInt32);
-}
-
-inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
- return x <= kMaxInt32;
-}
-
-inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return x <= kMaxUInt32;
-}
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -530,8 +509,27 @@ class CpuFeatures : public AllStatic {
};
-#define ASSEMBLER_INSTRUCTION_LIST(V) \
- V(mov)
+#define ASSEMBLER_INSTRUCTION_LIST(V) \
+ V(add) \
+ V(and) \
+ V(cmp) \
+ V(dec) \
+ V(idiv) \
+ V(imul) \
+ V(inc) \
+ V(lea) \
+ V(mov) \
+ V(movzxb) \
+ V(movzxw) \
+ V(neg) \
+ V(not) \
+ V(or) \
+ V(repmovs) \
+ V(sbb) \
+ V(sub) \
+ V(test) \
+ V(xchg) \
+ V(xor)
class Assembler : public AssemblerBase {
@@ -576,8 +574,21 @@ class Assembler : public AssemblerBase {
// the absolute address of the target.
// These functions convert between absolute Addresses of Code objects and
// the relative displacements stored in the code.
- static inline Address target_address_at(Address pc);
- static inline void set_target_address_at(Address pc, Address target);
+ static inline Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ static inline void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target);
+ static inline Address target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -586,8 +597,8 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
}
static inline RelocInfo::Mode RelocInfoNone() {
@@ -667,11 +678,24 @@ class Assembler : public AssemblerBase {
// - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
// - Instructions on 32-bit (doubleword) operands/registers use 'l'.
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
- //
- // Some mnemonics, such as "and", are the same as C++ keywords.
- // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
+ // - Instructions on operands/registers with pointer size use 'p'.
#define DECLARE_INSTRUCTION(instruction) \
+ template<class P1> \
+ void instruction##p(P1 p1) { \
+ emit_##instruction(p1, kPointerSize); \
+ } \
+ \
+ template<class P1> \
+ void instruction##l(P1 p1) { \
+ emit_##instruction(p1, kInt32Size); \
+ } \
+ \
+ template<class P1> \
+ void instruction##q(P1 p1) { \
+ emit_##instruction(p1, kInt64Size); \
+ } \
+ \
template<class P1, class P2> \
void instruction##p(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kPointerSize); \
@@ -685,6 +709,21 @@ class Assembler : public AssemblerBase {
template<class P1, class P2> \
void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt64Size); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##p(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kPointerSize); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##l(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt32Size); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##q(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
@@ -701,15 +740,15 @@ class Assembler : public AssemblerBase {
void pushfq();
void popfq();
- void push(Immediate value);
+ void pushq(Immediate value);
// Push a 32 bit integer, and guarantee that it is actually pushed as a
// 32 bit value, the normal push will optimize the 8 bit case.
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
+ void pushq_imm32(int32_t imm32);
+ void pushq(Register src);
+ void pushq(const Operand& src);
- void pop(Register dst);
- void pop(const Operand& dst);
+ void popq(Register dst);
+ void popq(const Operand& dst);
void enter(Immediate size);
void leave();
@@ -741,18 +780,14 @@ class Assembler : public AssemblerBase {
void movsxwq(Register dst, const Operand& src);
void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src);
- void movzxbq(Register dst, const Operand& src);
- void movzxbl(Register dst, const Operand& src);
- void movzxwq(Register dst, const Operand& src);
- void movzxwl(Register dst, const Operand& src);
- void movzxwl(Register dst, Register src);
// Repeated moves.
void repmovsb();
void repmovsw();
- void repmovsl();
- void repmovsq();
+ void repmovsp() { emit_repmovs(kPointerSize); }
+ void repmovsl() { emit_repmovs(kInt32Size); }
+ void repmovsq() { emit_repmovs(kInt64Size); }
// Instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
@@ -764,59 +799,6 @@ class Assembler : public AssemblerBase {
void cmovl(Condition cc, Register dst, Register src);
void cmovl(Condition cc, Register dst, const Operand& src);
- // Exchange two registers
- void xchgq(Register dst, Register src);
- void xchgl(Register dst, Register src);
-
- // Arithmetics
- void addl(Register dst, Register src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(Register dst, const Operand& src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(const Operand& dst, Register src) {
- arithmetic_op_32(0x01, src, dst);
- }
-
- void addq(Register dst, Register src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(Register dst, const Operand& src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(const Operand& dst, Register src) {
- arithmetic_op(0x01, src, dst);
- }
-
- void addq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void addq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void sbbl(Register dst, Register src) {
- arithmetic_op_32(0x1b, dst, src);
- }
-
- void sbbq(Register dst, Register src) {
- arithmetic_op(0x1b, dst, src);
- }
-
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -859,86 +841,10 @@ class Assembler : public AssemblerBase {
arithmetic_op_16(0x39, src, dst);
}
- void cmpl(Register dst, Register src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, src, dst);
- }
-
- void cmpl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpq(Register dst, Register src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(Register dst, const Operand& src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(const Operand& dst, Register src) {
- arithmetic_op(0x39, src, dst);
- }
-
- void cmpq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void cmpq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void and_(Register dst, Register src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(Register dst, const Operand& src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(const Operand& dst, Register src) {
- arithmetic_op(0x21, src, dst);
- }
-
- void and_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void and_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void andl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x4, dst, src);
- }
-
- void andl(Register dst, Register src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andl(Register dst, const Operand& src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
- void decq(Register dst);
- void decq(const Operand& dst);
- void decl(Register dst);
- void decl(const Operand& dst);
void decb(Register dst);
void decb(const Operand& dst);
@@ -947,80 +853,9 @@ class Assembler : public AssemblerBase {
// Sign-extends eax into edx:eax.
void cdq();
- // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
- void idivq(Register src);
- // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
- void idivl(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // rdx:rax = rax * src.
- void imul(Register dst, Register src); // dst = dst * src.
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
- // Signed 32-bit multiply instructions.
- void imull(Register dst, Register src); // dst = dst * src.
- void imull(Register dst, const Operand& src); // dst = dst * src.
- void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
-
- void incq(Register dst);
- void incq(const Operand& dst);
- void incl(Register dst);
- void incl(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
- void leal(Register dst, const Operand& src);
-
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
- void neg(Register dst);
- void neg(const Operand& dst);
- void negl(Register dst);
-
- void not_(Register dst);
- void not_(const Operand& dst);
- void notl(Register dst);
-
- void or_(Register dst, Register src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, Register src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(Register dst, const Operand& src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, const Operand& src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(const Operand& dst, Register src) {
- arithmetic_op(0x09, src, dst);
- }
-
- void orl(const Operand& dst, Register src) {
- arithmetic_op_32(0x09, src, dst);
- }
-
- void or_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
- void or_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
void rcl(Register dst, Immediate imm8) {
shift(dst, imm8, 0x2);
}
@@ -1112,46 +947,6 @@ class Assembler : public AssemblerBase {
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
- void subq(Register dst, Register src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(Register dst, const Operand& src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(const Operand& dst, Register src) {
- arithmetic_op(0x29, src, dst);
- }
-
- void subq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subl(Register dst, Register src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(Register dst, const Operand& src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(const Operand& dst, Register src) {
- arithmetic_op_32(0x29, src, dst);
- }
-
- void subl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
void subb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x5, dst, src);
}
@@ -1160,61 +955,11 @@ class Assembler : public AssemblerBase {
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
- void testl(Register dst, Register src);
- void testl(Register reg, Immediate mask);
- void testl(const Operand& op, Register reg);
- void testl(const Operand& op, Immediate mask);
- void testq(const Operand& op, Register reg);
- void testq(Register dst, Register src);
- void testq(Register dst, Immediate mask);
-
- void xor_(Register dst, Register src) {
- if (dst.code() == src.code()) {
- arithmetic_op_32(0x33, dst, src);
- } else {
- arithmetic_op(0x33, dst, src);
- }
- }
-
- void xorl(Register dst, Register src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, const Operand& src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xorl(const Operand& dst, Register src) {
- arithmetic_op_32(0x31, src, dst);
- }
-
- void xorl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xor_(Register dst, const Operand& src) {
- arithmetic_op(0x33, dst, src);
- }
-
- void xor_(const Operand& dst, Register src) {
- arithmetic_op(0x31, src, dst);
- }
-
- void xor_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- void xor_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
+ void bsrl(Register dst, Register src);
// Miscellaneous
void clc();
@@ -1260,9 +1005,6 @@ class Assembler : public AssemblerBase {
// Call near absolute indirect, address in register
void call(Register adr);
- // Call near indirect
- void call(const Operand& operand);
-
// Jumps
// Jump short or near relative.
// Use a 32-bit signed displacement.
@@ -1274,9 +1016,6 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (r64)
void jmp(Register adr);
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
// Conditional jumps
void j(Condition cc,
Label* L,
@@ -1407,6 +1146,8 @@ class Assembler : public AssemblerBase {
void movapd(XMMRegister dst, XMMRegister src);
+ void psllq(XMMRegister reg, byte imm8);
+
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
@@ -1472,6 +1213,12 @@ class Assembler : public AssemblerBase {
// Use --code-comments to enable.
void RecordComment(const char* msg, bool force = false);
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -1499,6 +1246,13 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ protected:
+ // Call near indirect
+ void call(const Operand& operand);
+
+ // Jump near absolute indirect (m64)
+ void jmp(const Operand& src);
+
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1605,6 +1359,14 @@ class Assembler : public AssemblerBase {
// numbers have a high bit set.
inline void emit_optional_rex_32(const Operand& op);
+ void emit_rex(int size) {
+ if (size == kInt64Size) {
+ emit_rex_64();
+ } else {
+ ASSERT(size == kInt32Size);
+ }
+ }
+
template<class P1>
void emit_rex(P1 p1, int size) {
if (size == kInt64Size) {
@@ -1709,12 +1471,331 @@ class Assembler : public AssemblerBase {
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ // Arithmetics
+ void emit_add(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x03, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x03, dst, src);
+ }
+ }
+
+ void emit_add(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x0, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+ }
+
+ void emit_add(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x03, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x03, dst, src);
+ }
+ }
+
+ void emit_add(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x1, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x1, src, dst);
+ }
+ }
+
+ void emit_add(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x0, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+ }
+
+ void emit_and(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x23, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x23, dst, src);
+ }
+ }
+
+ void emit_and(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x23, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x23, dst, src);
+ }
+ }
+
+ void emit_and(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x21, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x21, src, dst);
+ }
+ }
+
+ void emit_and(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x4, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+ }
+
+ void emit_and(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x4, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+ }
+
+ void emit_cmp(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x3B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x3B, dst, src);
+ }
+ }
+
+ void emit_cmp(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x3B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x3B, dst, src);
+ }
+ }
+
+ void emit_cmp(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x39, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x39, src, dst);
+ }
+ }
+
+ void emit_cmp(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x7, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+ }
+
+ void emit_cmp(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x7, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+ }
+
+ void emit_dec(Register dst, int size);
+ void emit_dec(const Operand& dst, int size);
+
+ // Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
+ // Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
+ // when size is 32.
+ void emit_idiv(Register src, int size);
+
+ // Signed multiply instructions.
+ // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
+ void emit_imul(Register src, int size);
+ void emit_imul(Register dst, Register src, int size);
+ void emit_imul(Register dst, const Operand& src, int size);
+ void emit_imul(Register dst, Register src, Immediate imm, int size);
+
+ void emit_inc(Register dst, int size);
+ void emit_inc(const Operand& dst, int size);
+
+ void emit_lea(Register dst, const Operand& src, int size);
+
void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(const Operand& dst, Immediate value, int size);
+ void emit_movzxb(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, Register src, int size);
+
+ void emit_neg(Register dst, int size);
+ void emit_neg(const Operand& dst, int size);
+
+ void emit_not(Register dst, int size);
+ void emit_not(const Operand& dst, int size);
+
+ void emit_or(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x0B, dst, src);
+ } else {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+ }
+
+ void emit_or(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x0B, dst, src);
+ } else {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+ }
+
+ void emit_or(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x9, src, dst);
+ } else {
+ arithmetic_op_32(0x9, src, dst);
+ }
+ }
+
+ void emit_or(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x1, dst, src);
+ } else {
+ immediate_arithmetic_op_32(0x1, dst, src);
+ }
+ }
+
+ void emit_or(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x1, dst, src);
+ } else {
+ immediate_arithmetic_op_32(0x1, dst, src);
+ }
+ }
+
+ void emit_repmovs(int size);
+
+ void emit_sbb(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x1b, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x1b, dst, src);
+ }
+ }
+
+ void emit_sub(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x2B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x2B, dst, src);
+ }
+ }
+
+ void emit_sub(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x5, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x5, dst, src);
+ }
+ }
+
+ void emit_sub(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x2B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x2B, dst, src);
+ }
+ }
+
+ void emit_sub(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x29, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x29, src, dst);
+ }
+ }
+
+ void emit_sub(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x5, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x5, dst, src);
+ }
+ }
+
+ void emit_test(Register dst, Register src, int size);
+ void emit_test(Register reg, Immediate mask, int size);
+ void emit_test(const Operand& op, Register reg, int size);
+ void emit_test(const Operand& op, Immediate mask, int size);
+
+ // Exchange two registers
+ void emit_xchg(Register dst, Register src, int size);
+
+ void emit_xor(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ if (dst.code() == src.code()) {
+ arithmetic_op_32(0x33, dst, src);
+ } else {
+ arithmetic_op(0x33, dst, src);
+ }
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x33, dst, src);
+ }
+ }
+
+ void emit_xor(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x33, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x33, dst, src);
+ }
+ }
+
+ void emit_xor(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x6, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+ }
+
+ void emit_xor(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x6, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+ }
+
+ void emit_xor(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x31, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x31, src, dst);
+ }
+ }
+
friend class CodePatcher;
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 6717dd5d6d..d5b1a73868 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -61,7 +61,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
if (extra_args == NEEDS_CALLED_FUNCTION) {
num_extra_args = 1;
__ PopReturnAddressTo(kScratchRegister);
- __ push(rdi);
+ __ Push(rdi);
__ PushReturnAddressFrom(kScratchRegister);
} else {
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
@@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
- __ addq(rax, Immediate(num_extra_args + 1));
+ __ addp(rax, Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
}
@@ -78,13 +78,13 @@ static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- __ push(rdi);
+ __ Push(rdi);
// Function is also the parameter to the runtime call.
- __ push(rdi);
+ __ Push(rdi);
__ CallRuntime(function_id, 1);
// Restore receiver.
- __ pop(rdi);
+ __ Pop(rdi);
}
@@ -93,13 +93,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(kScratchRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
- __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
__ jmp(kScratchRegister);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ leap(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
}
@@ -114,7 +114,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@@ -124,25 +124,38 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool count_constructions,
+ bool create_memento) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
+ // -- rbx: allocation site or undefined
// -----------------------------------
// Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ ASSERT(!is_api_function || !count_constructions);\
+
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(rbx);
+ __ Push(rbx);
+ }
+
// Store a smi-tagged arguments count on the stack.
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
// Push the function to invoke on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
@@ -154,7 +167,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ Move(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+ __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
#endif
@@ -186,22 +199,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
SharedFunctionInfo::kConstructionCountOffset));
__ j(not_zero, &allocate);
- __ push(rax);
- __ push(rdi);
+ __ Push(rax);
+ __ Push(rdi);
- __ push(rdi); // constructor
+ __ Push(rdi); // constructor
// The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
- __ pop(rdi);
- __ pop(rax);
+ __ Pop(rdi);
+ __ Pop(rax);
__ bind(&allocate);
}
// Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
+ if (create_memento) {
+ __ addp(rdi, Immediate(AllocationMemento::kSize));
+ }
// rdi: size of new object
__ Allocate(rdi,
rbx,
@@ -209,10 +225,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
no_reg,
&rt_call,
NO_ALLOCATION_FLAGS);
+ Factory* factory = masm->isolate()->factory();
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
+ // rdi: start of next object (including memento if create_memento)
__ movp(Operand(rbx, JSObject::kMapOffset), rax);
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
__ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
@@ -220,24 +237,39 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Set extra fields in the newly allocated object.
// rax: initial map
// rbx: JSObject
- // rdi: start of next object
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+ // rdi: start of next object (including memento if create_memento)
+ __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
- __ movzxbq(rsi,
+ __ movzxbp(rsi,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(rsi,
+ __ leap(rsi,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmpq(rsi, rdi);
+ __ cmpp(rsi, rdi);
__ Assert(less_equal,
kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(rcx, rsi, rdx);
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+ } else if (create_memento) {
+ __ leap(rsi, Operand(rdi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(rcx, rsi, rdx);
+
+ // Fill in memento fields if necessary.
+ // rsi: points to the allocated but uninitialized memento.
+ Handle<Map> allocation_memento_map = factory->allocation_memento_map();
+ __ Move(Operand(rsi, AllocationMemento::kMapOffset),
+ allocation_memento_map);
+ // Get the cell or undefined.
+ __ movp(rdx, Operand(rsp, kPointerSize*2));
+ __ movp(Operand(rsi, AllocationMemento::kAllocationSiteOffset),
+ rdx);
+ } else {
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
}
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -246,7 +278,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rax: initial map
// rbx: JSObject
// rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
+ __ orp(rbx, Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -254,13 +286,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rbx: JSObject
// rdi: start of next object
// Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx,
+ __ movzxbp(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbp(rcx,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
// Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
+ __ movzxbp(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ __ subp(rdx, rcx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
__ Assert(positive, kPropertyAllocationCountFailed);
@@ -296,13 +328,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
{ Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
__ movp(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
+ __ addp(rcx, Immediate(kPointerSize));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(below, &loop);
}
@@ -310,7 +342,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// the JSObject
// rbx: JSObject
// rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ orp(rdi, Immediate(kHeapObjectTag)); // add the heap tag
__ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
@@ -329,17 +361,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// rdi: function (constructor)
__ bind(&rt_call);
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ movp(rdi, Operand(rsp, kPointerSize*2));
+ __ Push(rdi);
+ offset = kPointerSize;
+ }
+
// Must restore rdi (constructor) before calling runtime.
- __ movp(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
+ __ movp(rdi, Operand(rsp, offset));
+ __ Push(rdi);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ movp(rbx, rax); // store result in rbx
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// New object allocated.
// rbx: newly allocated object
__ bind(&allocated);
+
+ if (create_memento) {
+ __ movp(rcx, Operand(rsp, kPointerSize*2));
+ __ Cmp(rcx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // rcx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ SmiAddConstant(
+ FieldOperand(rcx, AllocationSite::kPretenureCreateCountOffset),
+ Smi::FromInt(1));
+ __ bind(&count_incremented);
+ }
+
// Retrieve the function from the stack.
- __ pop(rdi);
+ __ Pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
__ movp(rax, Operand(rsp, 0));
@@ -348,20 +413,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
+ __ Push(rbx);
+ __ Push(rbx);
// Set up pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ movp(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
- __ decq(rcx);
+ __ decp(rcx);
__ j(greater_equal, &loop);
// Call the function.
@@ -411,7 +476,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1);
@@ -420,17 +485,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
@@ -470,8 +535,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ movp(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
// Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
+ __ Push(rdx);
+ __ Push(r8);
// Load the number of arguments and setup pointer to the arguments.
__ movp(rax, r9);
@@ -497,8 +562,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
+ __ Push(rdi);
+ __ Push(rdx);
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
@@ -524,18 +589,16 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ jmp(&entry);
__ bind(&loop);
__ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
+ __ Push(Operand(kScratchRegister, 0)); // dereference handle
+ __ addp(rcx, Immediate(1));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_sentinel);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
// Expects rdi to hold function pointer.
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
@@ -565,7 +628,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
@@ -574,15 +637,15 @@ static void CallCompileOptimized(MacroAssembler* masm,
bool concurrent) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- __ push(rdi);
+ __ Push(rdi);
// Function is also the parameter to the runtime call.
- __ push(rdi);
+ __ Push(rdi);
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
- __ pop(rdi);
+ __ Pop(rdi);
}
@@ -607,7 +670,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// Re-execute the code that was patched back to the young age when
// the stub returns.
- __ subq(Operand(rsp, 0), Immediate(5));
+ __ subp(Operand(rsp, 0), Immediate(5));
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
@@ -643,7 +706,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
- __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(2);
@@ -655,10 +718,10 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// Perform prologue operations usually performed by the young code stub.
__ PopReturnAddressTo(kScratchRegister);
- __ push(rbp); // Caller's frame pointer.
+ __ pushq(rbp); // Caller's frame pointer.
__ movp(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
__ PushReturnAddressFrom(kScratchRegister);
// Jump to point after the code-age stub.
@@ -681,12 +744,12 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ Popad();
// Tear down internal frame.
}
- __ pop(MemOperand(rsp, 0)); // Ignore state offset
+ __ Pop(MemOperand(rsp, 0)); // Ignore state offset
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
@@ -710,7 +773,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
// Tear down internal frame.
}
@@ -719,13 +782,13 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -762,12 +825,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
//
// 1. Make sure we have at least one argument.
{ Label done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &done);
__ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
__ PushReturnAddressFrom(rbx);
- __ incq(rax);
+ __ incp(rax);
__ bind(&done);
}
@@ -799,7 +862,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_zero, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ movp(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
@@ -817,14 +880,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Enter an internal frame in order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movp(rbx, rax);
__ Set(rdx, 0); // indicate regular JS_FUNCTION
- __ pop(rax);
+ __ Pop(rax);
__ SmiToInteger32(rax, rax);
}
@@ -866,25 +929,25 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
__ movp(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
+ __ decp(rcx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
+ __ popq(rbx); // Discard copy of return address.
+ __ decp(rax); // One fewer argument (first argument is new receiver).
}
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
- __ testq(rdx, rdx);
+ __ testp(rdx, rdx);
__ j(zero, &function);
__ Set(rbx, 0);
- __ cmpq(rdx, Immediate(1));
+ __ cmpp(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
__ PopReturnAddressTo(rdx);
- __ push(rdi); // re-add proxy object as additional argument
+ __ Push(rdi); // re-add proxy object as additional argument
__ PushReturnAddressFrom(rdx);
- __ incq(rax);
+ __ incp(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -904,7 +967,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpq(rax, rbx);
+ __ cmpp(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -932,8 +995,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(Operand(rbp, kArgumentsOffset));
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow. We are not trying to catch
@@ -944,17 +1007,17 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
+ __ subp(rcx, kScratchRegister);
// Make rdx the space we need for the array when it is unrolled onto the
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
+ __ cmpp(rcx, rdx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(rax);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
// End of stack check.
@@ -963,8 +1026,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
+ __ Push(rax); // limit
+ __ Push(Immediate(0)); // index
// Get the receiver.
__ movp(rbx, Operand(rbp, kReceiverOffset));
@@ -990,7 +1053,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(rbx, &call_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
@@ -1005,7 +1068,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Convert the receiver to an object.
__ bind(&call_to_object);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movp(rbx, rax);
__ jmp(&push_receiver, Label::kNear);
@@ -1017,7 +1080,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Push the receiver.
__ bind(&push_receiver);
- __ push(rbx);
+ __ Push(rbx);
// Copy all arguments from the array to the stack.
Label entry, loop;
@@ -1036,7 +1099,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// case, we know that we are not generating a test instruction next.
// Push the nth argument.
- __ push(rax);
+ __ Push(rax);
// Update the index on the stack and in register rax.
__ movp(rax, Operand(rbp, kIndexOffset));
@@ -1044,7 +1107,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movp(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ cmpp(rax, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
// Call the function.
@@ -1061,8 +1124,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Call the function proxy.
__ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
+ __ Push(rdi); // add function proxy as last argument
+ __ incp(rax);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -1128,10 +1191,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ Move(rbx, undefined_sentinel);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1150,7 +1210,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
+ __ cmpp(rdi, rcx);
__ Assert(equal, kUnexpectedStringFunction);
}
@@ -1158,11 +1218,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// (including the receiver).
StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &no_arguments);
__ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx);
__ movp(rax, rbx);
@@ -1233,10 +1293,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_conversions(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdi); // Preserve the function.
- __ push(rax);
+ __ Push(rdi); // Preserve the function.
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(rdi);
+ __ Pop(rdi);
}
__ movp(rbx, rax);
__ jmp(&argument_is_string);
@@ -1246,7 +1306,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&no_arguments);
__ LoadRoot(rbx, Heap::kempty_stringRootIndex);
__ PopReturnAddressTo(rcx);
- __ lea(rsp, Operand(rsp, kPointerSize));
+ __ leap(rsp, Operand(rsp, kPointerSize));
__ PushReturnAddressFrom(rcx);
__ jmp(&argument_is_string);
@@ -1256,7 +1316,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_gc_required(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rbx);
+ __ Push(rbx);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
__ ret(0);
@@ -1264,20 +1324,20 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
// Push the function on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Preserve the number of arguments on the stack. Must preserve rax,
// rbx and rcx because these registers are used when copying the
// arguments and the receiver.
__ Integer32ToSmi(r8, rax);
- __ push(r8);
+ __ Push(r8);
}
@@ -1287,12 +1347,12 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Leave the frame.
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
// Remove caller arguments from the stack.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
}
@@ -1310,9 +1370,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label enough, too_few;
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpq(rax, rbx);
+ __ cmpp(rax, rbx);
__ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
@@ -1321,15 +1381,15 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rax, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(r8);
- __ push(Operand(rax, 0));
- __ subq(rax, Immediate(kPointerSize));
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(Operand(rax, 0));
+ __ subp(rax, Immediate(kPointerSize));
+ __ cmpp(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
@@ -1340,24 +1400,24 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(r8);
- __ push(Operand(rdi, 0));
- __ subq(rdi, Immediate(kPointerSize));
- __ cmpq(r8, rax);
+ __ incp(r8);
+ __ Push(Operand(rdi, 0));
+ __ subp(rdi, Immediate(kPointerSize));
+ __ cmpp(r8, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
- __ incq(r8);
- __ push(kScratchRegister);
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(kScratchRegister);
+ __ cmpp(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
@@ -1389,13 +1449,13 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
Label skip;
// If the code object is null, just return to the unoptimized code.
- __ cmpq(rax, Immediate(0));
+ __ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
@@ -1409,7 +1469,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
- __ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+ __ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
// Overwrite the return address on the stack.
__ movq(StackOperandForReturnAddress(0), rax);
@@ -1426,7 +1486,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 075964bcee..c949a423a2 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -46,7 +46,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
}
@@ -77,7 +77,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
@@ -88,7 +88,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
@@ -99,15 +100,15 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rbx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { rbx, rdx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -142,7 +143,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
}
@@ -166,6 +167,26 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rcx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -213,7 +234,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
@@ -241,7 +262,7 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
@@ -365,7 +386,7 @@ void StringAddStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
}
@@ -470,7 +491,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
rax.is(descriptor->register_params_[param_count - 1]));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->register_params_[i]);
+ __ Push(descriptor->register_params_[i]);
}
ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
@@ -521,7 +542,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
int double_offset = offset();
// Account for return address and saved regs if input is rsp.
- if (input_reg.is(rsp)) double_offset += 3 * kPointerSize;
+ if (input_reg.is(rsp)) double_offset += 3 * kRegisterSize;
MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
MemOperand exponent_operand(MemOperand(input_reg,
@@ -541,14 +562,14 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// is the return register, then save the temp register we use in its stead
// for the result.
Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
- __ push(scratch1);
- __ push(save_reg);
+ __ pushq(scratch1);
+ __ pushq(save_reg);
bool stash_exponent_copy = !input_reg.is(rsp);
__ movl(scratch1, mantissa_operand);
__ movsd(xmm0, mantissa_operand);
__ movl(rcx, exponent_operand);
- if (stash_exponent_copy) __ push(rcx);
+ if (stash_exponent_copy) __ pushq(rcx);
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
__ shrl(rcx, Immediate(HeapNumber::kExponentShift));
@@ -583,14 +604,14 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Restore registers
__ bind(&done);
if (stash_exponent_copy) {
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
}
if (!final_result_reg.is(result_reg)) {
ASSERT(final_result_reg.is(rcx));
__ movl(final_result_reg, result_reg);
}
- __ pop(save_reg);
- __ pop(scratch1);
+ __ popq(save_reg);
+ __ popq(scratch1);
__ ret(0);
}
@@ -601,14 +622,14 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
// Load operand in rdx into xmm0, or branch to not_numbers.
__ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
__ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -689,8 +710,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&try_arithmetic_simplification);
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmpl(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
+ __ cmpl(exponent, Immediate(0x1));
+ __ j(overflow, &call_runtime);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -767,7 +788,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&fast_power);
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), double_exponent);
__ fld_d(Operand(rsp, 0)); // E
__ movsd(Operand(rsp, 0), double_base);
@@ -794,12 +815,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(rsp, 0));
__ movsd(double_result, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ jmp(&done);
__ bind(&fast_power_failed);
__ fninit();
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ jmp(&call_runtime);
}
@@ -913,99 +934,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->length_string());
- __ j(not_equal, &miss);
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = rdx;
- Register value = rax;
- Register scratch = rbx;
- if (kind() == Code::KEYED_STORE_IC) {
- __ Cmp(rcx, masm->isolate()->factory()->length_string());
- __ j(not_equal, &miss);
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ movp(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ movp(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ PopReturnAddressTo(scratch);
- __ push(receiver);
- __ push(value);
- __ PushReturnAddressFrom(scratch);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
@@ -1026,7 +954,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check index against formal parameters count limit passed in
// through register rax. Use unsigned comparison to get negative
// check for free.
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
@@ -1041,7 +969,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// comparison to get negative check for free.
__ bind(&adaptor);
__ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
+ __ cmpp(rdx, rcx);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
@@ -1056,13 +984,13 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ PopReturnAddressTo(rbx);
- __ push(rdx);
+ __ Push(rdx);
__ PushReturnAddressFrom(rbx);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// rsp[0] : return address
// rsp[8] : number of parameters (tagged)
@@ -1095,14 +1023,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ SmiToInteger64(rcx,
Operand(rdx,
ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ movp(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
// Compute the mapped parameter count = min(rbx, rcx) in rbx.
- __ cmpq(rbx, rcx);
+ __ cmpp(rbx, rcx);
__ j(less_equal, &try_allocate, Label::kNear);
__ movp(rbx, rcx);
@@ -1113,17 +1041,17 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
- __ xor_(r8, r8);
- __ testq(rbx, rbx);
+ __ xorp(r8, r8);
+ __ testp(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
- __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
__ bind(&no_parameter_map);
// 2. Backing store.
- __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
+ __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
@@ -1134,10 +1062,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label has_mapped_parameters, copy;
__ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
- const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
+ const int kIndex = Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX;
__ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
__ jmp(&copy, Label::kNear);
@@ -1174,7 +1102,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
__ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
// rax = address of new object (tagged)
@@ -1184,16 +1112,16 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(zero, &skip_parameter_map);
- __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
// rbx contains the untagged argument count. Add 2 and tag to write.
__ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
__ Integer64PlusConstantToSmi(r9, rbx, 2);
__ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
- __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
// Copy the parameter slots and the holes in the arguments.
@@ -1209,11 +1137,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Load tagged parameter count into r9.
__ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, args.GetArgumentOperand(2));
- __ subq(r8, r9);
+ __ addp(r8, args.GetArgumentOperand(2));
+ __ subp(r8, r9);
__ Move(r11, factory->the_hole_value());
__ movp(rdx, rdi);
- __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
// r9 = loop variable (tagged)
// r8 = mapping index (tagged)
// r11 = the hole value
@@ -1251,21 +1179,21 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ movp(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
- __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
- __ subq(rdx, kScratchRegister);
+ __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
+ __ subp(rdx, kScratchRegister);
__ jmp(&arguments_test, Label::kNear);
__ bind(&arguments_loop);
- __ subq(rdx, Immediate(kPointerSize));
+ __ subp(rdx, Immediate(kPointerSize));
__ movp(r9, Operand(rdx, 0));
__ movp(FieldOperand(rdi, r8,
times_pointer_size,
FixedArray::kHeaderSize),
r9);
- __ addq(r8, Immediate(1));
+ __ addp(r8, Immediate(1));
__ bind(&arguments_test);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(less, &arguments_loop, Label::kNear);
// Return and remove the on-stack parameters.
@@ -1276,11 +1204,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
__ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[8] : number of parameters
// rsp[16] : receiver displacement
@@ -1298,12 +1226,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ movp(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
@@ -1331,7 +1259,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ movp(args.GetArgumentOperand(1), rdx);
@@ -1339,11 +1267,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &add_arguments_object, Label::kNear);
- __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
@@ -1352,7 +1280,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
__ movp(rdi, Operand(rdi, offset));
// Copy the JS object part.
@@ -1370,7 +1298,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// If there are no actual arguments, we're done.
Label done;
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack.
@@ -1378,7 +1306,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
+ __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
__ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
@@ -1393,9 +1321,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
+ __ addp(rdi, Immediate(kPointerSize));
+ __ subp(rdx, Immediate(kPointerSize));
+ __ decp(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@@ -1404,7 +1332,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
}
@@ -1413,7 +1341,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1441,7 +1369,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ Load(kScratchRegister, address_of_regexp_stack_memory_size);
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
@@ -1533,7 +1461,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmpq(rbx, Immediate(kExternalStringTag));
+ __ cmpp(rbx, Immediate(kExternalStringTag));
__ j(greater_equal, &not_seq_nor_cons); // Go to (7).
// (4) Cons string. Check that it's flat.
@@ -1614,7 +1542,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Move(kScratchRegister, address_of_regexp_stack_memory_address);
__ movp(r9, Operand(kScratchRegister, 0));
__ Move(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
+ __ addp(r9, Operand(kScratchRegister, 0));
__ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
// Argument 6: Set the number of capture registers to zero to force global
@@ -1650,24 +1578,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
// Prepare start and end index of the input.
// Load the length from the original sliced string if that is the case.
- __ addq(rbx, r14);
+ __ addp(rbx, r14);
__ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
- __ addq(r14, arg_reg_3); // Using arg3 as scratch.
+ __ addp(r14, arg_reg_3); // Using arg3 as scratch.
// rbx: start index of the input
// r14: end index of the input
// r15: original subject string
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg_reg_4,
+ __ leap(arg_reg_4,
FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
- __ lea(arg_reg_3,
+ __ leap(arg_reg_3,
FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
__ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
- __ lea(arg_reg_4,
+ __ leap(arg_reg_4,
FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg_reg_3,
+ __ leap(arg_reg_3,
FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
__ bind(&setup_rest);
@@ -1679,7 +1607,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movp(arg_reg_1, r15);
// Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(r11);
__ LeaveApiExitFrame(true);
@@ -1764,7 +1692,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer and make it a smi.
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
@@ -1793,7 +1721,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
masm->ExternalOperand(pending_exception_address, rbx);
__ movp(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(equal, &runtime);
__ movp(pending_exception_operand, rdx);
@@ -1807,7 +1735,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1828,7 +1756,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
// (8a) Is the external string one byte? If yes, go to (6).
__ testb(rbx, Immediate(kStringEncodingMask));
@@ -1890,7 +1818,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
Register scratch) {
__ JumpIfSmi(object, label);
__ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
+ __ movzxbp(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
@@ -1910,9 +1838,9 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Compare two smis.
Label non_smi, smi_done;
__ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
+ __ subp(rdx, rax);
__ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done);
__ movp(rax, rdx);
__ ret(0);
@@ -1926,7 +1854,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Two identical objects are equal unless they are both NaN or undefined.
{
Label not_identical;
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(not_equal, &not_identical, Label::kNear);
if (cc != equal) {
@@ -1966,7 +1894,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
if (cc == greater_equal || cc == greater) {
- __ neg(rax);
+ __ negp(rax);
}
__ ret(0);
@@ -2044,7 +1972,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Return a result of -1, 0, or 1, based on EFLAGS.
__ setcc(above, rax);
__ setcc(below, rcx);
- __ subq(rax, rcx);
+ __ subp(rax, rcx);
__ ret(0);
// If one of the numbers was NaN, then the result is always false.
@@ -2112,7 +2040,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// a heap object has the low bit clear.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ leap(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, &not_both_objects, Label::kNear);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
@@ -2137,8 +2065,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Push arguments below the return address to prepare jump to builtin.
__ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rax);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
@@ -2161,92 +2089,118 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
- // rbx : cache cell for call target
+ // rbx : Feedback vector
+ // rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
+ Label initialize, done, miss, megamorphic, not_array_function,
+ done_no_smi_convert;
// Load the cache state into rcx.
- __ movp(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ __ SmiToInteger32(rdx, rdx);
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmpq(rcx, rdi);
+ __ cmpp(rcx, rdi);
__ j(equal, &done);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ j(equal, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in rcx.
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ j(not_equal, &miss);
-
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in rcx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ j(not_equal, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpp(rdi, rcx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Arguments register must be smi-tagged to call out.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
- __ push(rdi);
- __ push(rbx);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpp(rdi, rcx);
+ __ j(not_equal, &not_array_function);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ __ Push(rdi);
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdx);
+ __ Push(rbx);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ Pop(rbx);
+ __ Pop(rdx);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ jmp(&done_no_smi_convert);
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
+ __ bind(&not_array_function);
}
- __ jmp(&done);
- __ bind(&not_array_function);
- __ movp(FieldOperand(rbx, Cell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
+ __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ rdi);
+
+ // We won't need rdx or rbx anymore, just save rdi
+ __ Push(rdi);
+ __ Push(rbx);
+ __ Push(rdx);
+ __ RecordWriteArray(rbx, rdi, rdx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(rdx);
+ __ Pop(rbx);
+ __ Pop(rdi);
__ bind(&done);
+ __ Integer32ToSmi(rdx, rdx);
+
+ __ bind(&done_no_smi_convert);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // rbx : cache cell for call target
+ // rbx : feedback vector
+ // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function, wrap, cont;
@@ -2262,6 +2216,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in rbx we need
+ // to set rbx to undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
}
}
@@ -2283,6 +2241,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &cont);
}
+
// Load the receiver from the stack.
__ movp(rax, args.GetReceiverOperand());
@@ -2305,15 +2264,18 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
+ // object (megamorphic symbol) so no write barrier is needed.
+ __ SmiToInteger32(rdx, rdx);
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ Integer32ToSmi(rdx, rdx);
}
// Check for function proxy.
__ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function);
__ PopReturnAddressTo(rcx);
- __ push(rdi); // put proxy as additional argument under return address
+ __ Push(rdi); // put proxy as additional argument under return address
__ PushReturnAddressFrom(rcx);
__ Set(rax, argc_ + 1);
__ Set(rbx, 0);
@@ -2340,10 +2302,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ bind(&wrap);
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(rdi);
- __ push(rax);
+ __ Push(rdi);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ pop(rdi);
+ __ Pop(rdi);
}
__ movp(args.GetReceiverOperand(), rax);
__ jmp(&cont);
@@ -2353,7 +2315,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
- // rbx : cache cell for call target
+ // rbx : feedback vector
+ // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// rdi : constructor function
Label slow, non_function_call;
@@ -2365,6 +2329,26 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ SmiToInteger32(rdx, rdx);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into ebx.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by rdx + 1.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into rbx, or undefined.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(equal, &feedback_register_initialized);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(rbx);
}
// Jump to the function-specific construct stub.
@@ -2372,7 +2356,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ leap(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
__ jmp(jmp_reg);
// rdi: called object
@@ -2424,23 +2408,9 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ movp(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, Immediate(0xf));
- __ cmpq(scratch, Immediate(0xf));
- __ j(equal, oom_label);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
@@ -2494,7 +2464,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
- __ lea(rcx, StackSpaceOperand(2));
+ __ leap(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
__ movp(rdx, r14); // argc.
__ movp(r8, r15); // argv.
@@ -2529,7 +2499,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ movq(rdx, Operand(rsp, 7 * kRegisterSize));
}
#endif
- __ lea(rcx, Operand(rax, 1));
+ __ leap(rcx, Operand(rax, 1));
// Lower 2 bits of rcx are 0 iff rax has failure tag.
__ testl(rcx, Immediate(kFailureTagMask));
__ j(zero, &failure_returned);
@@ -2547,9 +2517,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ j(zero, &retry, Label::kNear);
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
-
// Retrieve the pending exception.
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, masm->isolate());
@@ -2557,9 +2524,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
masm->ExternalOperand(pending_exception_address);
__ movp(rax, pending_exception_operand);
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
-
// Clear the pending exception.
pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
@@ -2615,13 +2579,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
// Call into the runtime system.
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
false,
false);
@@ -2629,7 +2591,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
false);
@@ -2639,27 +2600,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
true);
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ Set(rax, static_cast<int64_t>(false));
- __ Store(external_caught, rax);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
- __ Move(rax, Failure::OutOfMemoryException(0x1), Assembler::RelocInfoNone());
- __ bind(&already_have_failure);
- __ Store(pending_exception, rax);
- // Fall through to the next label.
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(0);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0);
+ }
__ bind(&throw_termination_exception);
__ ThrowUncatchable(rax);
@@ -2678,7 +2626,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
// Push the stack frame type marker twice.
@@ -2687,22 +2635,22 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
__ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
+ __ Push(kScratchRegister); // context slot
+ __ Push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/X32/Win64 calling conventions).
+ __ pushq(r12);
+ __ pushq(r13);
+ __ pushq(r14);
+ __ pushq(r15);
#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
- __ push(rbx);
+ __ pushq(rbx);
#ifdef _WIN64
// On Win64 XMM6-XMM15 are callee-save
- __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
@@ -2727,13 +2675,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
{
Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ push(c_entry_fp_operand);
+ __ Push(c_entry_fp_operand);
}
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ Load(rax, js_entry_sp);
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_outermost_js);
__ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ movp(rax, rbp);
@@ -2767,7 +2715,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ Store(pending_exception, rax);
// Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
+ __ Push(Immediate(0)); // receiver
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return. We load the address from an
@@ -2782,7 +2730,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
__ Load(rax, entry);
}
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+ __ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
__ call(kScratchRegister);
// Unlink this frame from the handler chain.
@@ -2790,7 +2738,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
- __ pop(rbx);
+ __ Pop(rbx);
__ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
__ Move(kScratchRegister, js_entry_sp);
@@ -2799,7 +2747,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore the top frame descriptor from the stack.
{ Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ pop(c_entry_fp_operand);
+ __ Pop(c_entry_fp_operand);
}
// Restore callee-saved registers (X64 conventions).
@@ -2815,23 +2763,23 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
__ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
__ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
- __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
#endif
- __ pop(rbx);
+ __ popq(rbx);
#ifdef _WIN64
// Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
+ __ popq(rsi);
+ __ popq(rdi);
#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
+ __ popq(r15);
+ __ popq(r14);
+ __ popq(r13);
+ __ popq(r12);
+ __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers
// Restore frame pointer and return.
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
}
@@ -2917,7 +2865,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} else {
// Get return address and delta to inlined map check.
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
@@ -2934,9 +2882,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Label loop, is_instance, is_not_instance;
__ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ cmpq(rcx, rbx);
+ __ cmpp(rcx, rbx);
__ j(equal, &is_instance, Label::kNear);
- __ cmpq(rcx, kScratchRegister);
+ __ cmpp(rcx, kScratchRegister);
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
__ j(equal, &is_not_instance, Label::kNear);
@@ -2958,7 +2906,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(true_offset >= 0 && true_offset < 0x100);
__ movl(rax, Immediate(true_offset));
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -2981,7 +2929,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(false_offset >= 0 && false_offset < 0x100);
__ movl(rax, Immediate(false_offset));
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -2996,7 +2944,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (HasCallSiteInlineCheck()) {
// Remove extra value from the stack.
__ PopReturnAddressTo(rcx);
- __ pop(rax);
+ __ Pop(rax);
__ PushReturnAddressFrom(rcx);
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
@@ -3061,21 +3009,21 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
+ __ Push(object_);
+ __ Push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ movp(index_, rax);
}
- __ pop(object_);
+ __ Pop(object_);
// Reload the instance type.
__ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
@@ -3090,10 +3038,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
- __ push(object_);
+ __ Push(object_);
__ Integer32ToSmi(index_, index_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ Push(index_);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
if (!result_.is(rax)) {
__ movp(result_, rax);
}
@@ -3130,7 +3078,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
- __ push(code_);
+ __ Push(code_);
__ CallRuntime(Runtime::kCharFromCode, 1);
if (!result_.is(rax)) {
__ movp(result_, rax);
@@ -3174,11 +3122,11 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Copy from edi to esi using rep movs instruction.
__ movl(kScratchRegister, count);
__ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy.
- __ repmovsq();
+ __ repmovsp();
// Find number of bytes left.
__ movl(count, kScratchRegister);
- __ and_(count, Immediate(kPointerSize - 1));
+ __ andp(count, Immediate(kPointerSize - 1));
// Check if there are more bytes to copy.
__ bind(&last_bytes);
@@ -3190,8 +3138,8 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&loop);
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
+ __ incp(src);
+ __ incp(dest);
__ decl(count);
__ j(not_zero, &loop);
@@ -3293,7 +3241,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
- __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
+ __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
Label not_original_string;
// Shorter than original string's length: an actual substring.
__ j(below, &not_original_string, Label::kNear);
@@ -3339,7 +3287,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
- __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
+ __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
__ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
// Update instance type.
__ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
@@ -3360,7 +3308,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rcx: length
// If coming from the make_two_character_string path, the string
// is too short to be sliced anyways.
- __ cmpq(rcx, Immediate(SlicedString::kMinLength));
+ __ cmpp(rcx, Immediate(SlicedString::kMinLength));
// Short slice. Copy instead of slicing.
__ j(less, &copy_routine);
// Allocate new sliced string. At this point we do not reload the instance
@@ -3410,7 +3358,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
@@ -3425,11 +3373,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movp(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
+ __ leap(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -3450,11 +3398,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movp(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
+ __ leap(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -3468,7 +3416,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// rax: string
@@ -3610,11 +3558,11 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
__ SmiToInteger32(length, length);
- __ lea(left,
+ __ leap(left,
FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
+ __ leap(right,
FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
+ __ negq(length);
Register index = length; // index = -length;
// Compare loop.
@@ -3642,7 +3590,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Check for identity.
Label not_same;
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &not_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
Counters* counters = masm->isolate()->counters();
@@ -3658,14 +3606,14 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
__ PopReturnAddressTo(rcx);
- __ addq(rsp, Immediate(2 * kPointerSize));
+ __ addp(rsp, Immediate(2 * kPointerSize));
__ PushReturnAddressFrom(rcx);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
@@ -3763,7 +3711,7 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ movp(rdi, FieldOperand(rbx, origin_offset));
- __ cmpq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ cmpp(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
__ j(not_equal, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
@@ -3777,7 +3725,7 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Store the value.
- __ lea(rdx, FieldOperand(rdi,
+ __ leap(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movp(Operand(rdx, 0), rcx);
@@ -3816,14 +3764,14 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
__ Load(rcx, new_space_allocation_top);
// Check if it's the end of elements.
- __ lea(rdx, FieldOperand(rdi,
+ __ leap(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpq(rdx, rcx);
+ __ cmpp(rdx, rcx);
__ j(not_equal, &call_builtin);
- __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
+ __ addp(rcx, Immediate(kAllocationDelta * kPointerSize));
Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
- __ cmpq(rcx, limit_operand);
+ __ cmpp(rcx, limit_operand);
__ j(above, &call_builtin);
// We fit and could grow elements.
@@ -3901,13 +3849,13 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
} else {
Label done;
- __ subq(rdx, rax);
+ __ subp(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
- __ not_(rdx);
+ __ notp(rdx);
__ bind(&done);
__ movp(rax, rdx);
}
@@ -3965,7 +3913,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ movl(rax, Immediate(0));
__ movl(rcx, Immediate(0));
__ setcc(above, rax); // Add one to zero if carry clear and not equal.
- __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
+ __ sbbp(rax, rcx); // Subtract one if below (aka. carry set).
__ ret(0);
__ bind(&unordered);
@@ -4013,16 +3961,16 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
// Check that both operands are internalized strings.
__ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
__ j(not_zero, &miss, Label::kNear);
// Internalized strings are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -4057,15 +4005,15 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// types loaded in tmp1 and tmp2.
__ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
__ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
// Unique names are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -4102,17 +4050,17 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// types loaded in tmp1 and tmp2.
__ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ movp(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
+ __ orp(tmp3, tmp2);
__ testb(tmp3, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
// Fast check for identical strings.
Label not_same;
- __ cmpq(left, right);
+ __ cmpp(left, right);
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
@@ -4128,7 +4076,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
Label do_compare;
STATIC_ASSERT(kInternalizedTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotInternalizedMask));
__ j(not_zero, &do_compare, Label::kNear);
// Make sure rax is non-zero. At this point input operands are
@@ -4154,13 +4102,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
__ PopReturnAddressTo(tmp1);
- __ push(left);
- __ push(right);
+ __ Push(left);
+ __ Push(right);
__ PushReturnAddressFrom(tmp1);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4180,7 +4128,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
__ ret(0);
__ bind(&miss);
@@ -4200,7 +4148,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Cmp(rbx, known_map_);
__ j(not_equal, &miss, Label::kNear);
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
__ ret(0);
__ bind(&miss);
@@ -4215,17 +4163,17 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdx);
- __ push(rax);
- __ push(rdx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rax);
+ __ Push(rdx);
+ __ Push(rax);
__ Push(Smi::FromInt(op_));
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
- __ pop(rax);
- __ pop(rdx);
+ __ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
+ __ Pop(rax);
+ __ Pop(rdx);
}
// Do a tail call to the rewritten stub.
@@ -4252,12 +4200,12 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Capacity is smi 2^n.
__ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
__ decl(index);
- __ and_(index,
+ __ andp(index,
Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+ __ leap(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
@@ -4287,9 +4235,9 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
__ Push(Handle<Object>(name));
- __ push(Immediate(name->Hash()));
+ __ Push(Immediate(name->Hash()));
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
}
@@ -4323,26 +4271,26 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
if (i > 0) {
__ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(r1, r0);
+ __ andp(r1, r0);
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+ __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
+ __ cmpp(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ j(equal, done);
}
NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP);
- __ push(name);
+ __ Push(name);
__ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
__ shrl(r0, Immediate(Name::kHashShift));
- __ push(r0);
+ __ Push(r0);
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(zero, miss);
__ jmp(done);
}
@@ -4369,7 +4317,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
__ decl(scratch);
- __ push(scratch);
+ __ Push(scratch);
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
@@ -4384,11 +4332,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(scratch, Operand(rsp, 0));
+ __ andp(scratch, Operand(rsp, 0));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+ __ leap(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
__ movp(scratch, Operand(dictionary_,
@@ -4400,7 +4348,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &not_in_dictionary);
// Stop if found the property.
- __ cmpq(scratch, args.GetArgumentOperand(0));
+ __ cmpp(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
@@ -4511,7 +4459,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -4524,13 +4472,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ ret(0);
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
Register address =
arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
@@ -4546,18 +4494,10 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -4571,11 +4511,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental_pop_object;
__ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
+ __ andp(regs_.scratch0(), regs_.object());
__ movp(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
- __ subq(regs_.scratch1(), Immediate(1));
+ __ subp(regs_.scratch1(), Immediate(1));
__ movp(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
@@ -4626,13 +4566,13 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
- __ push(regs_.object());
+ __ Push(regs_.object());
__ EnsureNotWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
&need_incremental_pop_object,
Label::kNear);
- __ pop(regs_.object());
+ __ Pop(regs_.object());
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
@@ -4646,7 +4586,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
+ __ Pop(regs_.object());
__ bind(&need_incremental);
@@ -4687,12 +4627,12 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ bind(&slow_elements);
__ PopReturnAddressTo(rdi);
- __ push(rbx);
- __ push(rcx);
- __ push(rax);
+ __ Push(rbx);
+ __ Push(rcx);
+ __ Push(rax);
__ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ push(rdx);
+ __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(rdx);
__ PushReturnAddressFrom(rdi);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
@@ -4700,7 +4640,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ bind(&fast_elements);
__ SmiToInteger32(kScratchRegister, rcx);
__ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ __ leap(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize));
__ movp(Operand(rcx, 0), rax);
// Update the write barrier for the array store.
@@ -4744,7 +4684,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
? kPointerSize
: 0;
- __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
+ __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
}
@@ -4761,16 +4701,16 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// This stub can be called from essentially anywhere, so it needs to save
// all volatile and callee-save registers.
const size_t kNumSavedRegisters = 2;
- __ push(arg_reg_1);
- __ push(arg_reg_2);
+ __ pushq(arg_reg_1);
+ __ pushq(arg_reg_2);
// Calculate the original stack pointer and store it in the second arg.
- __ lea(arg_reg_2,
+ __ leap(arg_reg_2,
Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
// Calculate the function address to the first arg.
__ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
- __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
// Save the remainder of the volatile registers.
masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
@@ -4787,8 +4727,8 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Restore volatile regs.
masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
- __ pop(arg_reg_2);
- __ pop(arg_reg_1);
+ __ popq(arg_reg_2);
+ __ popq(arg_reg_1);
__ Ret();
}
@@ -4850,7 +4790,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
if (mode == DISABLE_ALLOCATION_SITES) {
@@ -4867,7 +4807,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ incl(rdx);
if (FLAG_debug_code) {
@@ -4951,7 +4891,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
AllocationSiteOverrideMode mode) {
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
@@ -4977,15 +4917,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
+ // -- rbx : AllocationSite or undefined
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
@@ -4999,31 +4935,21 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in rbx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &okay_here);
- __ Cmp(FieldOperand(rbx, 0), cell_map);
- __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
- __ bind(&okay_here);
+ // We should either have undefined in rbx or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(rbx);
}
Label no_info;
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ Cmp(rbx, undefined_sentinel);
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &no_info);
- __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
- __ Cmp(FieldOperand(rbx, 0),
- masm->isolate()->factory()->allocation_site_map());
- __ j(not_equal, &no_info);
// Only look at the lower 16 bits of the transition info.
__ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
+ __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -5036,7 +4962,7 @@ void InternalArrayConstructorStub::GenerateCase(
Label not_zero_case, not_one_case;
Label normal_sequence;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_zero_case);
InternalArrayNoArgumentConstructorStub stub0(kind);
__ TailCallStub(&stub0);
@@ -5050,7 +4976,7 @@ void InternalArrayConstructorStub::GenerateCase(
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
InternalArraySingleArgumentConstructorStub
@@ -5071,7 +4997,6 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
@@ -5096,9 +5021,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
- __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
+ __ andp(rcx, Immediate(Map::kElementsKindMask));
__ shr(rcx, Immediate(Map::kElementsKindShift));
if (FLAG_debug_code) {
@@ -5144,7 +5069,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = rsi;
int argc = ArgumentBits::decode(bit_field_);
- bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5161,29 +5086,29 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(return_address);
// context save
- __ push(context);
+ __ Push(context);
// load context from callee
__ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
// callee
- __ push(callee);
+ __ Push(callee);
// call data
- __ push(call_data);
+ __ Push(call_data);
Register scratch = call_data;
if (!call_data_undefined) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
}
// return value
- __ push(scratch);
+ __ Push(scratch);
// return value default
- __ push(scratch);
+ __ Push(scratch);
// isolate
__ Move(scratch,
ExternalReference::isolate_address(masm->isolate()));
- __ push(scratch);
+ __ Push(scratch);
// holder
- __ push(holder);
+ __ Push(holder);
__ movp(scratch, rsp);
// Push return address back on stack.
@@ -5197,7 +5122,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_.
__ movp(StackSpaceOperand(0), scratch);
- __ addq(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
__ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_.
__ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
// FunctionCallbackInfo::is_construct_call_.
@@ -5216,23 +5141,25 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
ASSERT(!api_function_address.is(arguments_arg));
// v8::InvocationCallback's argument.
- __ lea(arguments_arg, StackSpaceOperand(0));
+ __ leap(arguments_arg, StackSpaceOperand(0));
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength,
+ // Accessor for FunctionCallbackInfo and first js arg.
+ StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
- FCA::kArgsLength - 1 - FCA::kContextSaveIndex);
+ FCA::kArgsLength - FCA::kContextSaveIndex);
+ // Stores return the first js argument
Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- FCA::kArgsLength - 1 - FCA::kReturnValueOffset);
+ is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
__ CallApiFunctionAndReturn(
api_function_address,
thunk_address,
callback_arg,
argc + FCA::kArgsLength + 1,
return_value_operand,
- restore_context ? &context_restore_operand : NULL);
+ &context_restore_operand);
}
@@ -5263,17 +5190,17 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Allocate v8::AccessorInfo in non-GCed stack space.
const int kArgStackSpace = 1;
- __ lea(name_arg, Operand(rsp, kPCOnStackSize));
+ __ leap(name_arg, Operand(rsp, kPCOnStackSize));
__ PrepareCallApiFunction(kArgStackSpace);
- __ lea(scratch, Operand(name_arg, 1 * kPointerSize));
+ __ leap(scratch, Operand(name_arg, 1 * kPointerSize));
// v8::PropertyAccessorInfo::args_.
__ movp(StackSpaceOperand(0), scratch);
// The context register (rsi) has been saved in PrepareCallApiFunction and
// could be used to pass arguments.
- __ lea(accessor_info_arg, StackSpaceOperand(0));
+ __ leap(accessor_info_arg, StackSpaceOperand(0));
Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index c65307a74e..8c8ab691ac 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -305,19 +305,19 @@ class RecordWriteStub: public PlatformCodeStub {
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+ if (!scratch0_.is(scratch0_orig_)) masm->Push(scratch0_);
if (!rcx.is(scratch0_orig_) &&
!rcx.is(object_orig_) &&
!rcx.is(address_orig_)) {
- masm->push(rcx);
+ masm->Push(rcx);
}
- masm->push(scratch1_);
+ masm->Push(scratch1_);
if (!address_.is(address_orig_)) {
- masm->push(address_);
+ masm->Push(address_);
masm->movp(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
- masm->push(object_);
+ masm->Push(object_);
masm->movp(object_, object_orig_);
}
}
@@ -328,19 +328,19 @@ class RecordWriteStub: public PlatformCodeStub {
// one, since only one of them can alias with rcx.
if (!object_.is(object_orig_)) {
masm->movp(object_orig_, object_);
- masm->pop(object_);
+ masm->Pop(object_);
}
if (!address_.is(address_orig_)) {
masm->movp(address_orig_, address_);
- masm->pop(address_);
+ masm->Pop(address_);
}
- masm->pop(scratch1_);
+ masm->Pop(scratch1_);
if (!rcx.is(scratch0_orig_) &&
!rcx.is(object_orig_) &&
!rcx.is(address_orig_)) {
- masm->pop(rcx);
+ masm->Pop(rcx);
}
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ if (!scratch0_.is(scratch0_orig_)) masm->Pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
@@ -401,7 +401,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index f292f7d251..9b92dc8673 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -66,13 +66,13 @@ UnaryMathFunction CreateExpFunction() {
// xmm0: raw double input.
XMMRegister input = xmm0;
XMMRegister result = xmm1;
- __ push(rax);
- __ push(rbx);
+ __ pushq(rax);
+ __ pushq(rbx);
MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
- __ pop(rbx);
- __ pop(rax);
+ __ popq(rbx);
+ __ popq(rax);
__ movsd(xmm0, result);
__ Ret();
@@ -300,7 +300,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new backing store.
__ bind(&new_backing_store);
- __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
+ __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
__ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
@@ -353,7 +353,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
__ bind(&done);
@@ -381,13 +381,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
- __ push(rax);
+ __ Push(rax);
__ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
// r8 : source FixedDoubleArray
// r9 : number of elements
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
__ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
// r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
@@ -404,7 +404,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Call into runtime if GC is required.
__ bind(&gc_required);
- __ pop(rax);
+ __ Pop(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(fail);
@@ -446,7 +446,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
rdi);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
// Replace receiver's backing store with newly created and filled FixedArray.
@@ -458,7 +458,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ pop(rax);
+ __ Pop(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&only_change_map);
@@ -496,7 +496,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Handle slices.
Label indirect_string_loaded;
__ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ addq(index, result);
+ __ addp(index, result);
__ movp(string, FieldOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded, Label::kNear);
@@ -606,13 +606,13 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ movq(temp2, double_scratch);
__ subsd(double_scratch, result);
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
- __ lea(temp1, Operand(temp2, 0x1ff800));
- __ and_(temp2, Immediate(0x7ff));
+ __ leaq(temp1, Operand(temp2, 0x1ff800));
+ __ andq(temp2, Immediate(0x7ff));
__ shr(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shl(temp1, Immediate(52));
- __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
+ __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
__ movsd(input, double_scratch);
@@ -640,10 +640,10 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(rbp);
+ patcher.masm()->pushq(rbp);
patcher.masm()->movp(rbp, rsp);
- patcher.masm()->push(rsi);
- patcher.masm()->push(rdi);
+ patcher.masm()->Push(rsi);
+ patcher.masm()->Push(rdi);
initialized = true;
}
return sequence;
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 8ae03deae3..36d5df678e 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -121,7 +121,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
Register reg = { r };
ASSERT(!reg.is(kScratchRegister));
if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
+ __ Push(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
__ PushInt64AsTwoSmis(reg);
@@ -145,7 +145,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(reg, kDebugZapValue);
}
if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
+ __ Pop(reg);
}
// Reconstruct the 64-bit value from two smis.
if ((non_object_regs & (1 << r)) != 0) {
@@ -154,9 +154,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
// Read current padding counter and skip corresponding number of words.
- __ pop(kScratchRegister);
+ __ Pop(kScratchRegister);
__ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+ __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
// Get rid of the internal frame.
}
@@ -164,7 +164,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ addq(rsp, Immediate(kPCOnStackSize));
+ __ addp(rsp, Immediate(kPCOnStackSize));
}
// Now that the break point has been handled, resume normal execution by
@@ -173,7 +173,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
__ Move(kScratchRegister, after_break_target);
- __ jmp(Operand(kScratchRegister, 0));
+ __ Jump(Operand(kScratchRegister, 0));
}
@@ -261,9 +261,11 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-x64.cc).
// ----------- S t a t e -------------
// -- rdi : function
- // -- rbx: cache cell for call target
+ // -- rbx: feedback array
+ // -- rdx: slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
+ 0, false);
}
@@ -285,10 +287,12 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// above IC call.
// ----------- S t a t e -------------
// -- rax: number of arguments
- // -- rbx: cache cell for call target
+ // -- rbx: feedback array
+ // -- rdx: feedback slot (smi)
// -----------------------------------
// The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
+ rax.bit(), false);
}
@@ -323,10 +327,10 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ movp(Operand(rax, 0), Immediate(0));
// We do not know our frame height, but set rsp based on rbp.
- __ lea(rsp, Operand(rbp, -1 * kPointerSize));
+ __ leap(rsp, Operand(rbp, -1 * kPointerSize));
- __ pop(rdi); // Function.
- __ pop(rbp);
+ __ Pop(rdi); // Function.
+ __ popq(rbp);
// Load context from the function.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
@@ -334,7 +338,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// Get function code.
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize));
// Re-run JSFunction, rdi is function, rsi is context.
__ jmp(rdx);
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index aee8be6e1b..4bc644defe 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -51,6 +51,26 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->int3();
+ }
+ }
+
// For each LLazyBailout instruction insert a absolute call to the
// corresponding deoptimization entry, or a short call to an absolute
// jump if space is short. The absolute jumps are put in a table just
@@ -63,6 +83,12 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
#endif
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+ deopt_data->SetSharedFunctionInfo(Smi::FromInt(0));
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Position where Call will be patched in.
@@ -141,7 +167,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::NumAllocatableRegisters();
- __ subq(rsp, Immediate(kDoubleRegsSize));
+ __ subp(rsp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
@@ -153,7 +179,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// to restore all later.
for (int i = 0; i < kNumberOfRegisters; i++) {
Register r = Register::from_code(i);
- __ push(r);
+ __ pushq(r);
}
const int kSavedRegistersAreaSize = kNumberOfRegisters * kRegisterSize +
@@ -170,11 +196,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
__ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
+ __ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
kPCOnStackSize));
- __ subq(arg5, rbp);
- __ neg(arg5);
+ __ subp(arg5, rbp);
+ __ negp(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
@@ -204,40 +230,40 @@ void Deoptimizer::EntryGenerator::Generate() {
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(rbx, offset));
+ __ Pop(Operand(rbx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
- __ pop(Operand(rbx, dst_offset));
+ __ popq(Operand(rbx, dst_offset));
}
// Remove the bailout id and return address from the stack.
- __ addq(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
+ __ addp(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addq(rcx, rsp);
+ __ addp(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
- __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ __ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
- __ pop(Operand(rdx, 0));
- __ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ Pop(Operand(rdx, 0));
+ __ addp(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
- __ cmpq(rcx, rsp);
+ __ cmpp(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
- __ push(rax);
+ __ pushq(rax);
__ PrepareCallCFunction(2);
__ movp(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate()));
@@ -246,7 +272,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 2);
}
- __ pop(rax);
+ __ popq(rax);
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop,
@@ -255,7 +281,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movp(rax, Operand(rax, Deoptimizer::output_offset()));
- __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0));
+ __ leap(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
@@ -263,14 +289,14 @@ void Deoptimizer::EntryGenerator::Generate() {
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
- __ subq(rcx, Immediate(sizeof(intptr_t)));
- __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ subp(rcx, Immediate(sizeof(intptr_t)));
+ __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(not_zero, &inner_push_loop);
- __ addq(rax, Immediate(kPointerSize));
+ __ addp(rax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
@@ -280,14 +306,14 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- __ push(Operand(rbx, FrameDescription::state_offset()));
- __ push(Operand(rbx, FrameDescription::pc_offset()));
- __ push(Operand(rbx, FrameDescription::continuation_offset()));
+ __ Push(Operand(rbx, FrameDescription::state_offset()));
+ __ Push(Operand(rbx, FrameDescription::pc_offset()));
+ __ Push(Operand(rbx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(rbx, offset));
+ __ Push(Operand(rbx, offset));
}
// Restore the registers from the stack.
@@ -299,7 +325,7 @@ void Deoptimizer::EntryGenerator::Generate() {
ASSERT(i > 0);
r = Register::from_code(i - 1);
}
- __ pop(r);
+ __ popq(r);
}
// Set up the roots register.
@@ -317,7 +343,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ push_imm32(i);
+ __ pushq_imm32(i);
__ jmp(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
@@ -335,6 +361,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 2d659cf0e7..b870eae854 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -485,9 +485,11 @@ int DisassemblerX64::PrintRightOperandHelper(
} else if (base == 5) {
// base == rbp means no base register (when mod == 0).
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
+ AppendToBuffer("[%s*%d%s0x%x]",
NameOfCPURegister(index),
- 1 << scale, disp);
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return 6;
} else if (index != 4 && base != 5) {
// [base+index*scale]
@@ -512,38 +514,29 @@ int DisassemblerX64::PrintRightOperandHelper(
int scale, index, base;
get_sib(sib, &scale, &index, &base);
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<char*>(modrmp + 2);
+ : *reinterpret_cast<int8_t*>(modrmp + 2);
if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(base), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(base), disp);
- }
+ AppendToBuffer("[%s%s0x%x]",
+ NameOfCPURegister(base),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
} else {
- if (-disp > 0) {
- AppendToBuffer("[%s+%s*%d-0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- -disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- disp);
- }
+ AppendToBuffer("[%s+%s*%d%s0x%x]",
+ NameOfCPURegister(base),
+ NameOfCPURegister(index),
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
} else {
// No sib.
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<char*>(modrmp + 1);
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(rm), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
- }
+ : *reinterpret_cast<int8_t*>(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]",
+ NameOfCPURegister(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return (mod == 2) ? 5 : 2;
}
break;
@@ -1096,6 +1089,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x50) {
AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x73) {
+ current += 1;
+ ASSERT(regop == 6);
+ AppendToBuffer("psllq,%s,%d", NameOfXMMRegister(rm), *current & 0x7f);
+ current += 1;
} else {
const char* mnemonic = "?";
if (opcode == 0x54) {
@@ -1326,6 +1324,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (opcode == 0xBD) {
+ AppendToBuffer("%s%c ", mnemonic, operand_size_code());
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("%s,", NameOfCPURegister(regop));
+ current += PrintRightOperand(current);
} else {
UnimplementedInstruction();
}
@@ -1368,6 +1372,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "movzxb";
case 0xB7:
return "movzxw";
+ case 0xBD:
+ return "bsr";
case 0xBE:
return "movsxb";
case 0xBF:
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 6eb02a9179..1fb77ffa6c 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -77,6 +77,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize;
+
+ static const int kConstantPoolOffset = 0; // Not used
};
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 621eacc708..f0b9438626 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -101,6 +101,23 @@ class JumpPatchSite BASE_EMBEDDED {
};
+static void EmitStackCheck(MacroAssembler* masm_,
+ int pointers = 0,
+ Register scratch = rsp) {
+ Isolate* isolate = masm_->isolate();
+ Label ok;
+ ASSERT(scratch.is(rsp) == (pointers == 0));
+ if (pointers != 0) {
+ __ movq(scratch, rsp);
+ __ subq(scratch, Immediate(pointers * kPointerSize));
+ }
+ __ CompareRoot(scratch, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@@ -118,6 +135,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -132,10 +152,10 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->is_classic_mode() && !info->is_native()) {
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
// +1 for return address.
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
@@ -168,9 +188,28 @@ void FullCodeGenerator::Generate() {
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, locals_count, rcx);
+ }
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ movq(rcx, Immediate(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ Push(rdx);
+ }
+ // Continue loop if not done.
+ __ decq(rcx);
+ __ j(not_zero, &loop_header, Label::kNear);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ Push(rdx);
}
}
}
@@ -183,15 +222,15 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in rdi.
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ push(rdi);
+ __ Push(rdi);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in rax. It replaces the context passed to us.
@@ -225,28 +264,28 @@ void FullCodeGenerator::Generate() {
// case the "arguments" or ".arguments" variables are in the context.
Comment cmnt(masm_, "[ Allocate arguments object");
if (function_in_register) {
- __ push(rdi);
+ __ Push(rdi);
} else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// The receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ lea(rdx,
+ __ leap(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(rdx);
+ __ Push(rdx);
__ Push(Smi::FromInt(num_parameters));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(type);
__ CallStub(&stub);
@@ -272,7 +311,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -281,11 +320,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ EmitStackCheck(masm_);
}
{ Comment cmnt(masm_, "[ Body");
@@ -360,7 +395,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else {
__ bind(&return_label_);
if (FLAG_trace) {
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
// Pretend that the exit is a backwards jump to the entry.
@@ -375,10 +410,10 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterDecrement(weight);
Label ok;
__ j(positive, &ok, Label::kNear);
- __ push(rax);
+ __ Push(rax);
__ call(isolate()->builtins()->InterruptCheck(),
RelocInfo::CODE_TARGET);
- __ pop(rax);
+ __ Pop(rax);
EmitProfilingCounterReset();
__ bind(&ok);
#ifdef DEBUG
@@ -391,7 +426,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
int no_frame_start = masm_->pc_offset();
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
@@ -429,7 +464,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
- __ push(operand);
+ __ Push(operand);
}
@@ -638,8 +673,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
- __ testq(result_register(), result_register());
+ CallIC(ic, condition->test_id());
+ __ testp(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -755,7 +790,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -787,7 +822,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(rsi);
+ __ Push(rsi);
__ Push(variable->name());
// Declaration nodes are always introduced in one of four modes.
ASSERT(IsDeclaredVariableMode(mode));
@@ -803,7 +838,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -853,11 +888,11 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(rsi);
+ __ Push(rsi);
__ Push(variable->name());
__ Push(Smi::FromInt(NONE));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -924,10 +959,10 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- __ push(rsi); // The context is the first argument.
+ __ Push(rsi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -935,7 +970,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -978,10 +1013,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (inline_smi_code) {
Label slow_case;
__ movp(rcx, rdx);
- __ or_(rcx, rax);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -991,7 +1026,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1003,7 +1038,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ jmp(clause->body_target());
__ bind(&skip);
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -1035,6 +1070,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1048,7 +1084,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &exit);
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
__ j(equal, &exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
@@ -1059,10 +1095,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
- __ push(rax);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
- __ push(rax);
+ __ Push(rax);
// Check for proxies.
Label call_runtime;
@@ -1084,7 +1120,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(rax); // Duplicate the enumerable object on the stack.
+ __ Push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
// If we got a map from the runtime call, we can do a fast
@@ -1109,28 +1145,29 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ push(rax); // Map.
- __ push(rcx); // Enumeration cache.
- __ push(rdx); // Number of valid entries for the map in the enum cache.
+ __ Push(rax); // Map.
+ __ Push(rcx); // Enumeration cache.
+ __ Push(rdx); // Number of valid entries for the map in the enum cache.
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
__ bind(&no_descriptors);
- __ addq(rsp, Immediate(kPointerSize));
+ __ addp(rsp, Immediate(kPointerSize));
__ jmp(&exit);
// We got a fixed array in register rax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(rbx, cell);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ Move(rbx, FeedbackVector());
+ __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
+ Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -1138,17 +1175,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(above, &non_proxy);
__ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
__ bind(&non_proxy);
- __ push(rbx); // Smi
- __ push(rax); // Array
+ __ Push(rbx); // Smi
+ __ Push(rax); // Array
__ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ push(rax); // Fixed array length (as smi).
+ __ Push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
__ movp(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
- __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
+ __ cmpp(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
// Get the current entry of the array into register rbx.
@@ -1167,7 +1204,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If not, we may have to filter the key.
Label update_each;
__ movp(rcx, Operand(rsp, 4 * kPointerSize));
- __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// For proxies, no filtering is done.
@@ -1178,8 +1215,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
- __ push(rcx); // Enumerable.
- __ push(rbx); // Current entry.
+ __ Push(rcx); // Enumerable.
+ __ Push(rbx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ Cmp(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_label());
@@ -1207,7 +1244,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ addq(rsp, Immediate(5 * kPointerSize));
+ __ addp(rsp, Immediate(5 * kPointerSize));
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1238,7 +1275,7 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
- __ push(rax);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
@@ -1287,16 +1324,16 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
__ Move(rbx, info);
__ CallStub(&stub);
} else {
- __ push(rsi);
+ __ Push(rsi);
__ Push(info);
__ Push(pretenure
? isolate()->factory()->true_value()
: isolate()->factory()->false_value());
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(rax);
}
@@ -1317,9 +1354,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
@@ -1331,7 +1368,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1346,10 +1383,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
__ bind(&next);
// Terminate at native context.
- __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
- __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
__ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
@@ -1376,9 +1413,9 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
@@ -1388,7 +1425,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
}
}
// Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// This function is used only for loads, not stores, so it's safe to
@@ -1413,16 +1450,15 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1439,7 +1475,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
@@ -1452,7 +1488,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
+ : "[ Stack slot");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1484,7 +1521,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1494,14 +1531,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(rax, var);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
__ bind(&done);
@@ -1514,15 +1551,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
- __ push(rsi); // Context.
+ __ Push(rsi); // Context.
__ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(rax);
break;
@@ -1549,11 +1586,11 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Create regexp literal using runtime function
// Result will be in rax.
- __ push(rcx);
+ __ Push(rcx);
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->pattern());
__ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ movp(rbx, rax);
__ bind(&materialized);
@@ -1563,10 +1600,10 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ jmp(&allocated);
__ bind(&runtime_allocate);
- __ push(rbx);
+ __ Push(rbx);
__ Push(Smi::FromInt(size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(rbx);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
@@ -1606,16 +1643,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -1643,7 +1679,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key();
Expression* value = property->value();
if (!result_saved) {
- __ push(rax); // Save result on the stack
+ __ Push(rax); // Save result on the stack
result_saved = true;
}
switch (property->kind()) {
@@ -1658,14 +1694,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->value());
__ movp(rdx, Operand(rsp, 0));
- CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
}
break;
}
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1676,7 +1712,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(value);
if (property->emit_store()) {
__ CallRuntime(Runtime::kSetPrototype, 2);
@@ -1698,7 +1734,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
@@ -1708,7 +1744,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
ASSERT(result_saved);
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ CallRuntime(Runtime::kToFastProperties, 1);
}
@@ -1764,11 +1800,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (expr->depth() > 1 || Serializer::enabled() ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_elements);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
@@ -1800,7 +1836,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(rax); // array literal
+ __ Push(rax); // array literal
__ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
@@ -1830,7 +1866,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
- __ addq(rsp, Immediate(kPointerSize)); // literal index
+ __ addp(rsp, Immediate(kPointerSize)); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
@@ -1839,13 +1875,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -1867,7 +1899,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
VisitForAccumulatorValue(property->obj());
- __ push(result_register());
+ __ Push(result_register());
} else {
VisitForStackValue(property->obj());
}
@@ -1877,7 +1909,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
__ movp(rdx, Operand(rsp, 0));
- __ push(rax);
+ __ Push(rax);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1907,7 +1939,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(rax); // Left operand goes on the stack.
+ __ Push(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1961,7 +1993,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
case Yield::SUSPEND:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
- __ push(result_register());
+ __ Push(result_register());
// Fall through.
case Yield::INITIAL: {
Label suspend, continuation, post_runtime, resume;
@@ -1980,16 +2012,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
- __ cmpq(rsp, rbx);
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
+ __ cmpp(rsp, rbx);
__ j(equal, &post_runtime);
- __ push(rax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ Push(rax); // generator object
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ __ Pop(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -2026,26 +2058,26 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(rcx, Heap::kthrow_stringRootIndex); // "throw"
- __ push(rcx);
- __ push(Operand(rsp, 2 * kPointerSize)); // iter
- __ push(rax); // exception
+ __ Push(rcx);
+ __ Push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ Push(rax); // exception
__ jmp(&l_call);
// try { received = %yield result }
// Shuffle the received result above a try handler and yield it without
// re-boxing.
__ bind(&l_try);
- __ pop(rax); // result
+ __ Pop(rax); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
- __ push(rax); // result
+ __ Push(rax); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ jmp(&l_resume);
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + handler_size;
__ movp(rax, Operand(rsp, generator_object_depth));
- __ push(rax); // g
+ __ Push(rax); // g
ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(l_continuation.pos()));
@@ -2053,10 +2085,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
- __ pop(rax); // result
+ __ Pop(rax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in rax
__ PopTryHandler();
@@ -2064,16 +2096,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
__ LoadRoot(rcx, Heap::knext_stringRootIndex); // "next"
- __ push(rcx);
- __ push(Operand(rsp, 2 * kPointerSize)); // iter
- __ push(rax); // received
+ __ Push(rcx);
+ __ Push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ Push(rax); // received
// result = receiver[f](arg);
__ bind(&l_call);
__ movp(rdx, Operand(rsp, kPointerSize));
__ movp(rax, Operand(rsp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
+ CallIC(ic, TypeFeedbackId::None());
__ movp(rdi, rax);
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2084,16 +2116,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// if (!result.done) goto l_try;
__ bind(&l_loop);
- __ push(rax); // save result
+ __ Push(rax); // save result
__ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done"
CallLoadIC(NOT_CONTEXTUAL); // result.done in rax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ testq(result_register(), result_register());
+ __ testp(result_register(), result_register());
__ j(zero, &l_try);
// result.value
- __ pop(rax); // result
+ __ Pop(rax); // result
__ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value"
CallLoadIC(NOT_CONTEXTUAL); // result.value in rax
context()->DropAndPlug(2, rax); // drop iter and g
@@ -2107,12 +2139,12 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in rax, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
// rbx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(rbx);
+ __ Pop(rbx);
// Check generator state.
Label wrong_state, closed_state, done;
@@ -2128,7 +2160,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
// Push receiver.
- __ push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+ __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
// Push holes for arguments to generator function.
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -2138,9 +2170,9 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(carry, &push_frame);
- __ push(rcx);
+ __ Push(rcx);
__ jmp(&push_argument_holes);
// Enter a new JavaScript frame, and initialize its slots as they were when
@@ -2150,10 +2182,10 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ call(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
- __ push(rbp); // Caller's frame pointer.
+ __ pushq(rbp); // Caller's frame pointer.
__ movp(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
// Load the operand stack size.
__ movp(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
@@ -2164,12 +2196,12 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// in directly.
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
- __ cmpq(rdx, Immediate(0));
+ __ cmpp(rdx, Immediate(0));
__ j(not_zero, &slow_resume);
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ SmiToInteger64(rcx,
FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
__ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
__ jmp(rdx);
@@ -2180,15 +2212,15 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// up the stack and the handlers.
Label push_operand_holes, call_resume;
__ bind(&push_operand_holes);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(carry, &call_resume);
- __ push(rcx);
+ __ Push(rcx);
__ jmp(&push_operand_holes);
__ bind(&call_resume);
- __ push(rbx);
- __ push(result_register());
+ __ Push(rbx);
+ __ Push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2201,15 +2233,15 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
EmitCreateIteratorResult(true);
} else {
// Throw the provided value.
- __ push(rax);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ Push(rax);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
- __ push(rbx);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ Push(rbx);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2227,13 +2259,13 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ Move(rbx, map);
- __ pop(rcx);
+ __ Pop(rcx);
__ Move(rdx, isolate()->factory()->ToBoolean(done));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
@@ -2264,7 +2296,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2277,17 +2309,16 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
Label done, stub_call, smi_case;
- __ pop(rdx);
+ __ Pop(rdx);
__ movp(rcx, rax);
- __ or_(rax, rdx);
+ __ orp(rax, rdx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
__ bind(&stub_call);
__ movp(rax, rcx);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2333,23 +2364,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
- __ pop(rdx);
+ __ Pop(rdx);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidLeftHandSide());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2370,22 +2395,22 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
break;
}
case NAMED_PROPERTY: {
- __ push(rax); // Preserve value.
+ __ Push(rax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
__ movp(rdx, rax);
- __ pop(rax); // Restore value.
+ __ Pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->value());
- CallStoreIC(NOT_CONTEXTUAL);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
- __ push(rax); // Preserve value.
+ __ Push(rax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ movp(rcx, rax);
- __ pop(rdx);
- __ pop(rax); // Restore value.
- Handle<Code> ic = is_classic_mode()
+ __ Pop(rdx);
+ __ Pop(rax); // Restore value.
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2396,44 +2421,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ movp(location, rax);
+ if (var->IsContextSlot()) {
+ __ movp(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ Push(rax); // Value.
+ __ Push(rsi); // Context.
+ __ Push(name);
+ __ Push(Smi::FromInt(strict_mode));
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(rcx, var->name());
__ movp(rdx, GlobalObjectOperand());
- CallStoreIC(CONTEXTUAL);
- } else if (op == Token::INIT_CONST) {
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
+ if (var->IsLookupSlot()) {
+ __ Push(rax);
+ __ Push(rsi);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
Label skip;
- __ movp(rdx, StackOperand(var));
+ MemOperand location = VarOperand(var, rcx);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &skip);
- __ movp(StackOperand(var), rax);
+ EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(rax);
- __ push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2442,20 +2481,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &assign, Label::kNear);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&assign);
- __ movp(location, rax);
- if (var->IsContextSlot()) {
- __ movp(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, rcx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2463,20 +2500,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ Check(equal, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ movp(location, rax);
- if (var->IsContextSlot()) {
- __ movp(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2492,8 +2516,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->value());
- __ pop(rdx);
- CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ __ Pop(rdx);
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2503,14 +2527,14 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- __ pop(rcx);
- __ pop(rdx);
+ __ Pop(rcx);
+ __ Pop(rdx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2529,7 +2553,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(rdx);
+ __ Pop(rdx);
EmitKeyedPropertyLoad(expr);
context()->Plug(rax);
}
@@ -2537,10 +2561,8 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ call(code, RelocInfo::CODE_TARGET, ast_id);
}
@@ -2559,7 +2581,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
+ // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
flags = NO_CALL_FUNCTION_FLAGS;
} else {
@@ -2569,7 +2591,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
flags = CALL_AS_METHOD;
}
@@ -2613,7 +2635,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
// Load the arguments.
@@ -2650,15 +2672,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ Move(rbx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, Smi::FromInt(expr->CallFeedbackSlot()));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2670,23 +2692,23 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ push(Operand(rsp, arg_count * kPointerSize));
+ __ Push(Operand(rsp, arg_count * kPointerSize));
} else {
__ PushRoot(Heap::kUndefinedValueRootIndex);
}
// Push the receiver of the enclosing function and do runtime call.
StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
- __ push(args.GetReceiverOperand());
+ __ Push(args.GetReceiverOperand());
// Push the language mode.
- __ Push(Smi::FromInt(language_mode()));
+ __ Push(Smi::FromInt(strict_mode()));
// Push the start position of the scope the calls resides in.
__ Push(Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2702,8 +2724,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2718,7 +2740,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Push a copy of the function (found below the arguments) and resolve
// eval.
- __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in rax (function) and
@@ -2751,11 +2773,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in rax) and
// the object holding it (returned in rdx).
- __ push(context_register());
+ __ Push(context_register());
__ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(rax); // Function.
- __ push(rdx); // Receiver.
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Push(rax); // Function.
+ __ Push(rdx); // Receiver.
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2764,7 +2786,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ jmp(&call, Label::kNear);
__ bind(&done);
// Push function.
- __ push(rax);
+ __ Push(rax);
// The receiver is implicitly the global receiver. Indicate this by
// passing the hole to the call function stub.
__ PushRoot(Heap::kUndefinedValueRootIndex);
@@ -2830,10 +2852,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code, but not in the snapshot.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ Move(rbx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, Smi::FromInt(expr->CallNewFeedbackSlot()));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -2905,10 +2934,10 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
- __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ movzxbp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2998,20 +3027,20 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Skip loop if no descriptors are valid.
__ NumberOfOwnDescriptors(rcx, rbx);
- __ cmpq(rcx, Immediate(0));
+ __ cmpp(rcx, Immediate(0));
__ j(equal, &done);
__ LoadInstanceDescriptors(rbx, r8);
// rbx: descriptor array.
// rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
- __ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
+ __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
- __ lea(rcx,
+ __ leap(rcx,
Operand(
r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ addq(r8, Immediate(DescriptorArray::kFirstOffset));
+ __ addp(r8, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// internalized string "valueOf" the result is false.
__ jmp(&entry);
@@ -3019,15 +3048,15 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movp(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
- __ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ addp(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
// Set the bit in the map to indicate that there is no local valueOf field.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ __ orp(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ bind(&skip_lookup);
@@ -3035,12 +3064,12 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ testq(rcx, Immediate(kSmiTagMask));
+ __ testp(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movp(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ cmpq(rcx,
+ __ cmpp(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3087,8 +3116,8 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(rax, map, if_false, DO_SMI_CHECK);
__ cmpl(FieldOperand(rax, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- __ j(not_equal, if_false);
+ Immediate(0x1));
+ __ j(no_overflow, if_false);
__ cmpl(FieldOperand(rax, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -3189,8 +3218,8 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ pop(rbx);
- __ cmpq(rax, rbx);
+ __ Pop(rbx);
+ __ cmpp(rax, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3310,7 +3339,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ __ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3389,7 +3418,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(scratch, stamp_operand);
- __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ cmpp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
@@ -3405,7 +3434,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(rax);
}
@@ -3422,8 +3451,8 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
VisitForAccumulatorValue(args->at(0)); // string
- __ pop(value);
- __ pop(index);
+ __ Pop(value);
+ __ Pop(index);
if (FLAG_debug_code) {
__ Check(__ CheckSmi(value), kNonSmiValue);
@@ -3455,8 +3484,8 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
VisitForAccumulatorValue(args->at(0)); // string
- __ pop(value);
- __ pop(index);
+ __ Pop(value);
+ __ Pop(index);
if (FLAG_debug_code) {
__ Check(__ CheckSmi(value), kNonSmiValue);
@@ -3495,7 +3524,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(rbx); // rax = value. rbx = object.
+ __ Pop(rbx); // rax = value. rbx = object.
Label done;
// If the object is a smi, return the value.
@@ -3560,7 +3589,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = rax;
Register result = rdx;
- __ pop(object);
+ __ Pop(object);
Label need_conversion;
Label index_out_of_range;
@@ -3607,7 +3636,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = rdx;
Register result = rax;
- __ pop(object);
+ __ Pop(object);
Label need_conversion;
Label index_out_of_range;
@@ -3649,7 +3678,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
- __ pop(rdx);
+ __ Pop(rdx);
StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub);
context()->Plug(rax);
@@ -3713,7 +3742,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ jmp(&done);
__ bind(&runtime);
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3728,8 +3757,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(2));
- __ pop(rbx);
- __ pop(rcx);
+ __ Pop(rbx);
+ __ Pop(rcx);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3770,7 +3799,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// tmp now holds finger offset as a smi.
SmiIndex index =
__ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
- __ cmpq(key, FieldOperand(cache,
+ __ cmpp(key, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize));
@@ -3783,9 +3812,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ Push(cache);
+ __ Push(key);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(rax);
@@ -3861,7 +3890,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Separator operand is already pushed. Make room for the two
// other stack fields, and clear the direction flag in anticipation
// of calling CopyBytes.
- __ subq(rsp, Immediate(2 * kPointerSize));
+ __ subp(rsp, Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
@@ -3899,7 +3928,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Live loop registers: index(int32), array_length(int32), string(String*),
// scratch, string_length(int32), elements(FixedArray*).
if (generate_debug_code_) {
- __ cmpq(index, array_length);
+ __ cmpp(index, array_length);
__ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
@@ -3975,7 +4004,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ movp(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
+ __ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ movp(string, separator_operand);
__ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
@@ -4003,7 +4032,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
@@ -4038,7 +4067,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Copy the separator character to the result.
__ movb(Operand(result_pos, 0), scratch);
- __ incq(result_pos);
+ __ incp(result_pos);
__ bind(&loop_2_entry);
// Get string = array[index].
@@ -4047,7 +4076,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
@@ -4063,16 +4092,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// count from -array_length to zero, so we don't need to maintain
// a loop limit.
__ movl(index, array_length_operand);
- __ lea(elements, FieldOperand(elements, index, times_pointer_size,
+ __ leap(elements, FieldOperand(elements, index, times_pointer_size,
FixedArray::kHeaderSize));
- __ neg(index);
+ __ negq(index);
// Replace separator string with pointer to its first character, and
// make scratch be its length.
__ movp(string, separator_operand);
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ movp(separator_operand, string);
@@ -4098,7 +4127,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movp(string, Operand(elements, index, times_pointer_size, 0));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incq(index);
@@ -4109,15 +4138,15 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&return_result);
// Drop temp values from the stack, and restore context register.
- __ addq(rsp, Immediate(3 * kPointerSize));
+ __ addp(rsp, Immediate(3 * kPointerSize));
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4130,7 +4159,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Push the builtins object as receiver.
__ movp(rax, GlobalObjectOperand());
- __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
+ __ Push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
// Load the function from the receiver.
__ movp(rax, Operand(rsp, 0));
@@ -4138,7 +4167,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
// Push the target function under the receiver.
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
// Push the arguments ("left-to-right").
@@ -4179,20 +4208,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ Push(Smi::FromInt(strict_mode_flag));
+ __ Push(Smi::FromInt(strict_mode()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
- __ push(GlobalObjectOperand());
+ __ Push(GlobalObjectOperand());
__ Push(var->name());
- __ Push(Smi::FromInt(kNonStrictMode));
+ __ Push(Smi::FromInt(SLOPPY));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
@@ -4203,9 +4230,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
+ __ Push(context_register());
__ Push(var->name());
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(rax);
}
} else {
@@ -4286,16 +4313,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand-sides are rewritten to have a 'throw
- // ReferenceError' as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4320,13 +4342,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
if (assign_type == NAMED_PROPERTY) {
VisitForAccumulatorValue(prop->obj());
- __ push(rax); // Copy of receiver, needed for later store.
+ __ Push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ movp(rdx, Operand(rsp, 0)); // Leave receiver on stack
- __ push(rax); // Copy of key, needed for later store.
+ __ Push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
}
@@ -4354,7 +4376,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(rax);
+ __ Push(rax);
break;
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
@@ -4389,7 +4411,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(rax);
+ __ Push(rax);
break;
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
@@ -4409,9 +4431,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- NOT_CONTEXTUAL,
- expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4441,8 +4461,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->value());
- __ pop(rdx);
- CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ __ Pop(rdx);
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4454,12 +4474,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(rcx);
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
+ __ Pop(rcx);
+ __ Pop(rdx);
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4480,7 +4500,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ Move(rcx, proxy->name());
__ movp(rax, GlobalObjectOperand());
// Use a regular load, not a contextual load, to avoid a reference
@@ -4489,6 +4509,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4496,9 +4517,9 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- __ push(rsi);
+ __ Push(rsi);
__ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4621,7 +4642,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
@@ -4630,16 +4651,16 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = CompareIC::ComputeCondition(op);
- __ pop(rdx);
+ __ Pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ movp(rcx, rdx);
- __ or_(rcx, rax);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
@@ -4647,11 +4668,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
}
@@ -4682,8 +4703,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
- __ testq(rax, rax);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ testp(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
@@ -4730,10 +4751,10 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+ __ Push(ContextOperand(rsi, Context::CLOSURE_INDEX));
} else {
ASSERT(declaration_scope->is_function_scope());
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@@ -4748,29 +4769,29 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
__ PopReturnAddressTo(rdx);
__ Move(rcx, masm_->CodeObject());
- __ subq(rdx, rcx);
+ __ subp(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
+ __ Push(rdx);
// Store result register while executing finally block.
- __ push(result_register());
+ __ Push(result_register());
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Load(rdx, pending_message_obj);
- __ push(rdx);
+ __ Push(rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Load(rdx, has_pending_message);
__ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
+ __ Push(rdx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Load(rdx, pending_message_script);
- __ push(rdx);
+ __ Push(rdx);
}
@@ -4778,30 +4799,30 @@ void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore pending message from stack.
- __ pop(rdx);
+ __ Pop(rdx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Store(pending_message_script, rdx);
- __ pop(rdx);
+ __ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Store(has_pending_message, rdx);
- __ pop(rdx);
+ __ Pop(rdx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Store(pending_message_obj, rdx);
// Restore result register from stack.
- __ pop(result_register());
+ __ Pop(result_register());
// Uncook return address.
- __ pop(rdx);
+ __ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
__ jmp(rdx);
}
@@ -4876,6 +4897,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
}
Assembler::set_target_address_at(call_target_address,
+ unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
@@ -4893,20 +4915,23 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
if (*jns_instr_address == kJnsInstruction) {
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return INTERRUPT;
}
ASSERT_EQ(kNopByteOne, *jns_instr_address);
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address) ==
+ if (Assembler::target_address_at(call_target_address,
+ unoptimized_code) ==
isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return OSR_AFTER_STACK_CHECK;
}
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index c76eca04d8..ea118d0763 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(scratch1, Operand(elements,
+ __ leap(scratch1, Operand(elements,
scratch1,
times_pointer_size,
kValueOffset - kHeapObjectTag));
@@ -424,9 +424,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rdi, Immediate(String::kHashShift));
- __ xor_(rcx, rdi);
+ __ xorp(rcx, rdi);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- __ and_(rcx, Immediate(mask));
+ __ andp(rcx, Immediate(mask));
// Load the key (consisting of map and internalized string) from the cache and
// check for match.
@@ -442,17 +442,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &try_next_entry);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
@@ -467,8 +467,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
__ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subq(rdi, rcx);
+ __ movzxbp(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ subp(rdi, rcx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
@@ -477,8 +477,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load in-object property.
__ bind(&load_in_object_property);
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rcx, rdi);
+ __ movzxbp(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+ __ addp(rcx, rdi);
__ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -571,8 +571,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ PopReturnAddressTo(rcx);
- __ push(rdx); // receiver
- __ push(rax); // key
+ __ Push(rdx); // receiver
+ __ Push(rax); // key
__ PushReturnAddressFrom(rcx);
// Perform tail call to the entry.
@@ -734,7 +734,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -852,14 +852,14 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load the elements into scratch1 and check its map. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments.
__ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
- __ cmpq(key, scratch2);
+ __ cmpp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
@@ -899,7 +899,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpq(key, scratch);
+ __ cmpp(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
return FieldOperand(backing_store,
@@ -909,7 +909,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -934,7 +934,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -945,7 +945,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
__ movp(mapped_location, rax);
- __ lea(r9, mapped_location);
+ __ leap(r9, mapped_location);
__ movp(r8, rax);
__ RecordWrite(rbx,
r9,
@@ -959,7 +959,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Operand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
__ movp(unmapped_location, rax);
- __ lea(r9, unmapped_location);
+ __ leap(r9, unmapped_location);
__ movp(r8, rax);
__ RecordWrite(rbx,
r9,
@@ -973,8 +973,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -982,9 +981,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
@@ -1024,8 +1021,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->load_miss(), 1);
__ PopReturnAddressTo(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
+ __ Push(rax); // receiver
+ __ Push(rcx); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1043,8 +1040,8 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
+ __ Push(rax); // receiver
+ __ Push(rcx); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1063,8 +1060,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->keyed_load_miss(), 1);
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
+ __ Push(rdx); // receiver
+ __ Push(rax); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1082,8 +1079,8 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
+ __ Push(rdx); // receiver
+ __ Push(rax); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1091,8 +1088,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1101,9 +1097,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rdx, rcx, rbx, no_reg);
@@ -1121,9 +1115,9 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // name
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1157,7 +1151,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1165,9 +1159,9 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -- rsp[0] : return address
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx);
- __ push(rcx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rcx);
+ __ Push(rax);
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode));
__ PushReturnAddressFrom(rbx);
@@ -1178,7 +1172,7 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1187,9 +1181,9 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode)); // Strict mode.
__ PushReturnAddressFrom(rbx);
@@ -1208,9 +1202,9 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1228,9 +1222,9 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1248,9 +1242,9 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 2cb09325fd..894a4dd3a7 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -87,7 +87,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -154,10 +154,10 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Classic mode functions need to replace the receiver with the global proxy
+ // Sloppy mode functions need to replace the receiver with the global proxy
// when called as functions (without an explicit receiver object).
if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
+ info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
StackArgumentsAccessor args(rsp, scope()->num_parameters());
@@ -187,11 +187,11 @@ bool LCodeGen::GeneratePrologue() {
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
- __ push(rax);
+ __ Push(rax);
__ Set(rax, slots);
__ movq(kScratchRegister, kSlotsZapValue);
Label loop;
@@ -200,9 +200,9 @@ bool LCodeGen::GeneratePrologue() {
kScratchRegister);
__ decl(rax);
__ j(not_zero, &loop);
- __ pop(rax);
+ __ Pop(rax);
} else {
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
@@ -222,8 +222,8 @@ bool LCodeGen::GeneratePrologue() {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in rax. It replaces the context passed to us.
@@ -269,17 +269,36 @@ void LCodeGen::GenerateOsrPrologue() {
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
ASSERT(slots >= 0);
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
}
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
}
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
+ if (instr->result()->IsRegister()) {
+ Register result_reg = ToRegister(instr->result());
+ __ movsxlq(result_reg, result_reg);
+ } else {
+ // Sign extend the 32bit result in the stack slots.
+ ASSERT(instr->result()->IsStackSlot());
+ Operand src = ToOperand(instr->result());
+ __ movsxlq(kScratchRegister, src);
+ __ movq(src, kScratchRegister);
+ }
+ }
+}
+
+
bool LCodeGen::GenerateJumpTable() {
Label needs_frame;
if (jump_table_.length() > 0) {
@@ -303,15 +322,15 @@ bool LCodeGen::GenerateJumpTable() {
} else {
__ bind(&needs_frame);
__ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
- __ push(rsi);
+ __ Push(rsi);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
+ __ Push(rsi);
__ movp(rsi, MemOperand(rsp, kPointerSize));
__ call(kScratchRegister);
}
@@ -335,7 +354,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -349,10 +369,10 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(info()->IsStub());
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
- __ push(rbp); // Caller's frame pointer.
- __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ pushq(rbp); // Caller's frame pointer.
+ __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
__ Push(Smi::FromInt(StackFrame::STUB));
- __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+ __ leap(rbp, Operand(rsp, 2 * kPointerSize));
Comment(";;; Deferred code");
}
code->Generate();
@@ -362,7 +382,7 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(frame_is_built_);
frame_is_built_ = false;
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
}
__ jmp(code->exit());
}
@@ -405,20 +425,18 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
}
-bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmi();
+ chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
}
-bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsTagged();
+bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
@@ -577,10 +595,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -725,7 +739,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfq();
- __ push(rax);
+ __ Push(rax);
Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
__ movl(rax, count_operand);
__ subl(rax, Immediate(1));
@@ -733,13 +747,13 @@ void LCodeGen::DeoptimizeIf(Condition cc,
if (FLAG_trap_on_deopt) __ int3();
__ movl(rax, Immediate(FLAG_deopt_every_n_times));
__ movl(count_operand, rax);
- __ pop(rax);
+ __ Pop(rax);
__ popfq();
ASSERT(frame_is_built_);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
__ movl(count_operand, rax);
- __ pop(rax);
+ __ Pop(rax);
__ popfq();
}
@@ -798,6 +812,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -985,281 +1007,324 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->RightIsPowerOf2()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
-
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ testl(left_reg, left_reg);
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ negl(left_reg);
- __ andl(left_reg, Immediate(divisor - 1));
- __ negl(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ testl(dividend, dividend);
+ __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+ // Note that this is correct even for kMinInt operands.
+ __ negl(dividend);
+ __ andl(dividend, Immediate(mask));
+ __ negl(dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
}
+ __ jmp(&done, Label::kNear);
+ }
- __ bind(&left_is_not_negative);
- __ andl(left_reg, Immediate(divisor - 1));
- __ bind(&done);
- } else {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(rax));
- Register right_reg = ToRegister(instr->right());
- ASSERT(!right_reg.is(rax));
- ASSERT(!right_reg.is(rdx));
- Register result_reg = ToRegister(instr->result());
- ASSERT(result_reg.is(rdx));
+ __ bind(&dividend_is_not_negative);
+ __ andl(dividend, Immediate(mask));
+ __ bind(&done);
+}
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (right->CanBeZero()) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label no_overflow_possible;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &no_overflow_possible, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Set(result_reg, 0);
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rax));
- // Sign extend dividend in eax into edx:eax, since we are using only the low
- // 32 bits of the values.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ testl(left_reg, left_reg);
- __ j(not_sign, &positive_left, Label::kNear);
- __ idivl(right_reg);
- __ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr->environment());
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ __ imull(rdx, rdx, Immediate(Abs(divisor)));
+ __ movl(rax, dividend);
+ __ subl(rax, rdx);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ j(not_zero, &remainder_not_zero, Label::kNear);
+ __ cmpl(dividend, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(rax));
+ Register right_reg = ToRegister(instr->right());
+ ASSERT(!right_reg.is(rax));
+ ASSERT(!right_reg.is(rdx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(rdx));
+
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmpl(left_reg, Immediate(kMinInt));
+ __ j(not_zero, &no_overflow_possible, Label::kNear);
+ __ cmpl(right_reg, Immediate(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Set(result_reg, 0);
__ jmp(&done, Label::kNear);
- __ bind(&positive_left);
}
+ __ bind(&no_overflow_possible);
+ }
+
+ // Sign extend dividend in eax into edx:eax, since we are using only the low
+ // 32 bits of the values.
+ __ cdq();
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
- __ bind(&done);
+ __ testl(result_reg, result_reg);
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive_left);
}
+ __ idivl(right_reg);
+ __ bind(&done);
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
-
- const Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- const Register result = ToRegister(instr->result());
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sarl(dividend, Immediate(shift));
return;
+ }
- case 1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
+ // If the divisor is negative, we have to negate and handle edge cases.
+ Label not_kmin_int, done;
+ __ negl(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ __ j(no_overflow, &not_kmin_int, Label::kNear);
+ if (divisor == -1) {
+ DeoptimizeIf(no_condition, instr->environment());
+ } else {
+ __ movl(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
}
- return;
+ }
+ __ bind(&not_kmin_int);
+ __ sarl(dividend, Immediate(shift));
+ __ bind(&done);
+}
- case -1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ negl(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rdx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- __ movsxlq(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ sar(result, Immediate(power));
- } else {
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ sarl(result, Immediate(power));
- }
- } else {
- Register reg1 = ToRegister(instr->temp());
- Register reg2 = ToRegister(instr->result());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - std::floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(std::floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- // The multiply is int64, so sign-extend to r64.
- __ movsxlq(reg1, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ neg(reg1);
- DeoptimizeIf(zero, instr->environment());
- }
- __ Set(reg2, multiplier);
- // Result just fit in r64, because it's int32 * uint32.
- __ imul(reg2, reg1);
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
- __ addq(reg2, Immediate(1 << 30));
- __ sar(reg2, Immediate(shift));
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ return;
}
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp3());
+ ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
+ Label needs_adjustment, done;
+ __ cmpl(dividend, Immediate(0));
+ __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ __ jmp(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(temp, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ __ decl(rdx);
+ __ bind(&done);
}
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmpl(dividend, Immediate(kMinInt));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ testl(dividend, Immediate(mask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sarl(result, Immediate(31));
+ __ shrl(result, Immediate(32 - shift));
+ __ addl(result, dividend);
+ __ sarl(result, Immediate(shift));
+ }
+ if (divisor < 0) __ negl(result);
+}
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- Label done, negative;
- __ cmpl(dividend, Immediate(0));
- __ j(less, &negative, Label::kNear);
- __ sarl(dividend, Immediate(power));
- if (divisor < 0) __ negl(dividend);
- __ jmp(&done, Label::kNear);
-
- __ bind(&negative);
- __ negl(dividend);
- __ sarl(dividend, Immediate(power));
- if (divisor > 0) __ negl(dividend);
- __ bind(&done);
- return; // Don't fall through to "__ neg" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ testl(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sarl(dividend, Immediate(power));
- }
- }
- if (divisor < 0) __ negl(dividend);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rdx));
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(ToRegister(instr->left()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rdx));
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negp(rdx);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ movl(rax, rdx);
+ __ imull(rax, rax, Immediate(divisor));
+ __ subl(rax, dividend);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+}
+
- Register left_reg = rax;
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->left());
+ Register divisor = ToRegister(instr->right());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(rax));
+ ASSERT(remainder.is(rdx));
+ ASSERT(result.is(rax));
+ ASSERT(!divisor.is(rax));
+ ASSERT(!divisor.is(rdx));
// Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(divisor, divisor);
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ testl(left_reg, left_reg);
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ testl(right_reg, right_reg);
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ testl(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ testl(divisor, divisor);
DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
+ __ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmpl(dividend, Immediate(kMinInt));
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
+ __ bind(&dividend_not_min_int);
}
- // Sign extend to rdx.
+ // Sign extend to rdx (= remainder).
__ cdq();
- __ idivl(right_reg);
+ __ idivl(divisor);
- if (instr->is_flooring()) {
+ if (hdiv->IsMathFloorOfDiv()) {
Label done;
- __ testl(rdx, rdx);
+ __ testl(remainder, remainder);
__ j(zero, &done, Label::kNear);
- __ xorl(rdx, right_reg);
- __ sarl(rdx, Immediate(31));
- __ addl(rax, rdx);
+ __ xorl(remainder, divisor);
+ __ sarl(remainder, Immediate(31));
+ __ addl(result, remainder);
__ bind(&done);
- } else if (!instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
+ } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
+ __ testl(remainder, remainder);
DeoptimizeIf(not_zero, instr->environment());
}
}
@@ -1323,14 +1388,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
- __ imul(left, ToOperand(right));
+ __ imulp(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
- __ imul(left, ToRegister(right));
+ __ imulp(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
}
@@ -1344,7 +1409,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// Bail out if the result is supposed to be negative zero.
Label done;
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ testq(left, left);
+ __ testp(left, left);
} else {
__ testl(left, left);
}
@@ -1360,7 +1425,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToOperand(right));
+ __ orp(kScratchRegister, ToOperand(right));
} else {
__ orl(kScratchRegister, ToOperand(right));
}
@@ -1368,7 +1433,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToRegister(right));
+ __ orp(kScratchRegister, ToRegister(right));
} else {
__ orl(kScratchRegister, ToRegister(right));
}
@@ -1408,13 +1473,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
+ __ andp(ToRegister(left), ToOperand(right));
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
+ __ orp(ToRegister(left), ToOperand(right));
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
+ __ xorp(ToRegister(left), ToOperand(right));
break;
default:
UNREACHABLE();
@@ -1424,13 +1489,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToRegister(right));
+ __ andp(ToRegister(left), ToRegister(right));
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToRegister(right));
+ __ orp(ToRegister(left), ToRegister(right));
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToRegister(right));
+ __ xorp(ToRegister(left), ToRegister(right));
break;
default:
UNREACHABLE();
@@ -1518,13 +1583,13 @@ void LCodeGen::DoSubI(LSubI* instr) {
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else if (right->IsRegister()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subq(ToRegister(left), ToRegister(right));
+ __ subp(ToRegister(left), ToRegister(right));
} else {
__ subl(ToRegister(left), ToRegister(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subq(ToRegister(left), ToOperand(right));
+ __ subp(ToRegister(left), ToOperand(right));
} else {
__ subl(ToRegister(left), ToOperand(right));
}
@@ -1601,7 +1666,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(kScratchRegister, stamp_operand);
- __ cmpq(kScratchRegister, FieldOperand(object,
+ __ cmpp(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
@@ -1642,17 +1707,17 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
Register string = ToRegister(instr->string());
if (FLAG_debug_code) {
- __ push(string);
+ __ Push(string);
__ movp(string, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
+ __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
__ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
- __ pop(string);
+ __ Pop(string);
}
Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
@@ -1706,44 +1771,44 @@ void LCodeGen::DoAddI(LAddI* instr) {
LOperand* right = instr->right();
Representation target_rep = instr->hydrogen()->representation();
- bool is_q = target_rep.IsSmi() || target_rep.IsExternal();
+ bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
int32_t offset = ToInteger32(LConstantOperand::cast(right));
- if (is_q) {
- __ lea(ToRegister(instr->result()),
- MemOperand(ToRegister(left), offset));
+ if (is_p) {
+ __ leap(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
} else {
__ leal(ToRegister(instr->result()),
MemOperand(ToRegister(left), offset));
}
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- if (is_q) {
- __ lea(ToRegister(instr->result()), address);
+ if (is_p) {
+ __ leap(ToRegister(instr->result()), address);
} else {
__ leal(ToRegister(instr->result()), address);
}
}
} else {
if (right->IsConstantOperand()) {
- if (is_q) {
- __ addq(ToRegister(left),
+ if (is_p) {
+ __ addp(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else {
__ addl(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
}
} else if (right->IsRegister()) {
- if (is_q) {
- __ addq(ToRegister(left), ToRegister(right));
+ if (is_p) {
+ __ addp(ToRegister(left), ToRegister(right));
} else {
__ addl(ToRegister(left), ToRegister(right));
}
} else {
- if (is_q) {
- __ addq(ToRegister(left), ToOperand(right));
+ if (is_p) {
+ __ addp(ToRegister(left), ToOperand(right));
} else {
__ addl(ToRegister(left), ToOperand(right));
}
@@ -1776,7 +1841,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_reg);
+ __ cmpp(left_reg, right_reg);
} else {
__ cmpl(left_reg, right_reg);
}
@@ -1785,7 +1850,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
} else {
Operand right_op = ToOperand(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_op);
+ __ cmpp(left_reg, right_op);
} else {
__ cmpl(left_reg, right_op);
}
@@ -1924,7 +1989,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (r.IsSmi()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
- __ testq(reg, reg);
+ __ testp(reg, reg);
EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
@@ -1956,7 +2021,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
@@ -2016,7 +2081,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ j(not_zero, instr->TrueLabel(chunk_));
__ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
@@ -2139,9 +2204,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
cc = ReverseCondition(cc);
} else if (instr->hydrogen_value()->representation().IsSmi()) {
if (right->IsRegister()) {
- __ cmpq(ToRegister(left), ToRegister(right));
+ __ cmpp(ToRegister(left), ToRegister(right));
} else {
- __ cmpq(ToRegister(left), ToOperand(right));
+ __ cmpp(ToRegister(left), ToOperand(right));
}
} else {
if (right->IsRegister()) {
@@ -2164,7 +2229,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
__ Cmp(left, right);
} else {
Register right = ToRegister(instr->right());
- __ cmpq(left, right);
+ __ cmpp(left, right);
}
EmitBranch(instr, equal);
}
@@ -2182,9 +2247,9 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), input_reg);
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
__ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
@@ -2210,8 +2275,8 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- EmitFalseBranch(instr, not_equal);
+ Immediate(0x1));
+ EmitFalseBranch(instr, no_overflow);
__ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
EmitBranch(instr, equal);
@@ -2318,7 +2383,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
EmitBranch(instr, condition);
}
@@ -2411,8 +2476,8 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// actual type and do a signed compare with the width of the type range.
__ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
@@ -2470,11 +2535,11 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
+ __ Push(ToRegister(instr->left()));
+ __ Push(ToRegister(instr->right()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
@@ -2520,7 +2585,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
__ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
- __ cmpq(map, Operand(kScratchRegister, 0));
+ __ cmpp(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
__ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
@@ -2557,14 +2622,14 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
InstanceofStub stub(flags);
- __ push(ToRegister(instr->value()));
+ __ Push(ToRegister(instr->value()));
__ Push(instr->function());
static const int kAdditionalDelta = 10;
int delta =
masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
ASSERT(delta >= 0);
- __ push_imm32(delta);
+ __ PushImm32(delta);
// We are pushing three values on the stack but recording a
// safepoint with two arguments because stub is going to
@@ -2582,7 +2647,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// PushSafepointRegisterScope.
__ movp(kScratchRegister, rax);
}
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
__ j(not_zero, &load_false, Label::kNear);
@@ -2603,7 +2668,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Condition condition = TokenToCondition(op, false);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(condition, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
@@ -2619,7 +2684,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
- __ push(rax);
+ __ Push(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
@@ -2629,7 +2694,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
@@ -2642,7 +2707,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg);
__ shl(reg, Immediate(kPointerSizeLog2));
- __ addq(rsp, reg);
+ __ addp(rsp, reg);
__ jmp(return_addr_reg);
}
if (no_frame_start != -1) {
@@ -2785,6 +2850,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Representation representation = access.representation();
if (representation.IsSmi() &&
instr->hydrogen()->representation().IsInteger32()) {
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(object, offset), representation);
+ __ AssertSmi(scratch);
+#endif
+
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -2861,9 +2932,13 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
instr->index()->IsConstantOperand()) {
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- StackArgumentsAccessor args(arguments, const_length,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(result, args.GetArgumentOperand(const_index));
+ if (const_index >= 0 && const_index < const_length) {
+ StackArgumentsAccessor args(arguments, const_length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movp(result, args.GetArgumentOperand(const_index));
+ } else if (FLAG_debug_code) {
+ __ int3();
+ }
} else {
Register length = ToRegister(instr->length());
// There are two words between the frame pointer and the last argument.
@@ -2883,19 +2958,6 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed (in this case) instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index argument
- // to the bounds check, which can be tagged, so that case must be
- // handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
int base_offset = instr->is_fixed_typed_array()
? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
: 0;
@@ -2925,7 +2987,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
- __ movzxbq(result, operand);
+ __ movzxbp(result, operand);
break;
case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
@@ -2933,7 +2995,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
break;
case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
- __ movzxwq(result, operand);
+ __ movzxwp(result, operand);
break;
case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
@@ -2958,7 +3020,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2969,19 +3031,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
@@ -3009,20 +3058,6 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
HLoadKeyed* hinstr = instr->hydrogen();
Register result = ToRegister(instr->result());
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that
- // case must be handled here, too.
- if (hinstr->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
bool requires_hole_check = hinstr->RequiresHoleCheck();
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->representation();
@@ -3030,6 +3065,17 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (representation.IsInteger32() &&
hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
ASSERT(!requires_hole_check);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+#endif
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -3108,7 +3154,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
+ __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
} else {
// Check for arguments adapter frame.
Label done, adapted;
@@ -3139,9 +3185,9 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
// If no arguments adaptor frame the number of arguments is fixed.
if (instr->elements()->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->elements()));
+ __ cmpp(rbp, ToRegister(instr->elements()));
} else {
- __ cmpq(rbp, ToOperand(instr->elements()));
+ __ cmpp(rbp, ToOperand(instr->elements()));
}
__ movl(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
@@ -3221,10 +3267,10 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpq(length, Immediate(kArgumentsLimit));
+ __ cmpp(length, Immediate(kArgumentsLimit));
DeoptimizeIf(above, instr->environment());
- __ push(receiver);
+ __ Push(receiver);
__ movp(receiver, length);
// Loop through the arguments pushing them onto the execution
@@ -3236,7 +3282,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&loop);
StackArgumentsAccessor args(elements, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ push(args.GetArgumentOperand(0));
+ __ Push(args.GetArgumentOperand(0));
__ decl(length);
__ j(not_zero, &loop);
@@ -3281,10 +3327,10 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- __ push(rsi); // The context is the first argument.
+ __ Push(rsi); // The context is the first argument.
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3318,7 +3364,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
// Set up deoptimization.
@@ -3349,7 +3395,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
- __ addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(target);
}
generator.AfterCall();
@@ -3383,7 +3429,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
} else {
Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
generator.BeforeCall(__ CallSize(target));
- __ call(target);
+ __ Call(target);
}
generator.AfterCall();
}
@@ -3416,7 +3462,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
CallRuntimeFromDeferred(
- Runtime::kAllocateHeapNumber, 0, instr, instr->context());
+ Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(rax)) __ movp(tmp, rax);
// Restore input_reg after call to runtime.
@@ -3446,10 +3492,10 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
- __ testq(input_reg, input_reg);
+ __ testp(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
- __ neg(input_reg); // Sets flags.
+ __ negp(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
@@ -3509,8 +3555,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ cmpl(output_reg, Immediate(0x1));
+ DeoptimizeIf(overflow, instr->environment());
} else {
Label negative_sign, done;
// Deoptimize on unordered.
@@ -3534,8 +3580,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ cmpl(output_reg, Immediate(0x1));
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3572,9 +3618,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
+ __ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3589,9 +3635,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmpl(output_reg, Immediate(0x80000000));
+ __ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -3721,17 +3767,31 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), input_reg);
__ fld_d(Operand(rsp, 0));
__ fyl2x();
__ fstp_d(Operand(rsp, 0));
__ movsd(input_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ bind(&done);
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label not_zero_input;
+ __ bsrl(result, input);
+
+ __ j(not_zero, &not_zero_input);
+ __ Set(result, 63); // 63^31 == 32
+
+ __ bind(&not_zero_input);
+ __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+}
+
+
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->function()).is(rdi));
@@ -3771,8 +3831,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ Set(rax, instr->arity());
// No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_value);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3784,7 +3843,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
- __ Move(rbx, factory()->undefined_value());
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3801,7 +3860,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
// We might need a change here
// look at the first argument
__ movp(rcx, Operand(rsp, 0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
@@ -3830,7 +3889,7 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
- __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
__ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
}
@@ -3840,10 +3899,10 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register base = ToRegister(instr->base_object());
if (instr->offset()->IsConstantOperand()) {
LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ lea(result, Operand(base, ToInteger32(offset)));
+ __ leap(result, Operand(base, ToInteger32(offset)));
} else {
Register offset = ToRegister(instr->offset());
- __ lea(result, Operand(base, offset, times_1, 0));
+ __ leap(result, Operand(base, offset, times_1, 0));
}
}
@@ -3860,7 +3919,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
ASSERT(value.is(rax));
- ASSERT(!access.representation().IsSpecialization());
LConstantOperand* object = LConstantOperand::cast(instr->object());
__ store_rax(ToExternalReference(object));
} else {
@@ -3872,19 +3930,16 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed = hinstr->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (FLAG_track_fields && representation.IsSmi()) {
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsInteger32Constant(operand_value) &&
- !IsSmiConstant(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32Constant(operand_value)) {
+ if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
DeoptimizeIf(no_condition, instr->environment());
}
} else {
@@ -3892,6 +3947,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
Condition cc = masm()->CheckSmi(value);
DeoptimizeIf(cc, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
}
} else if (representation.IsDouble()) {
@@ -3922,9 +3980,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- SmiCheck check_needed = hinstr->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
@@ -3934,6 +3989,11 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (representation.IsSmi() &&
hinstr->value()->representation().IsInteger32()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(write_register, offset), representation);
+ __ AssertSmi(scratch);
+#endif
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -3986,8 +4046,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4026,7 +4085,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
} else {
Register reg2 = ToRegister(instr->index());
if (representation.IsSmi()) {
- __ cmpq(reg, reg2);
+ __ cmpp(reg, reg2);
} else {
__ cmpl(reg, reg2);
}
@@ -4043,7 +4102,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
} else {
if (representation.IsSmi()) {
- __ cmpq(length, ToRegister(instr->index()));
+ __ cmpp(length, ToRegister(instr->index()));
} else {
__ cmpl(length, ToRegister(instr->index()));
}
@@ -4057,19 +4116,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
int base_offset = instr->is_fixed_typed_array()
? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
: 0;
@@ -4122,7 +4168,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4133,20 +4179,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
if (instr->NeedsCanonicalization()) {
Label have_value;
@@ -4174,26 +4206,23 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
HStoreKeyed* hinstr = instr->hydrogen();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (hinstr->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->value()->representation();
if (representation.IsInteger32()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+#endif
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -4234,7 +4263,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
Register key_reg(ToRegister(key));
- __ lea(key_reg, operand);
+ __ leap(key_reg, operand);
__ RecordWrite(elements,
key_reg,
value,
@@ -4262,7 +4291,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4360,7 +4389,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ Set(result, 0);
PushSafepointRegistersScope scope(this);
- __ push(string);
+ __ Push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
@@ -4370,10 +4399,10 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
} else {
Register index = ToRegister(instr->index());
__ Integer32ToSmi(index, index);
- __ push(index);
+ __ Push(index);
}
CallRuntimeFromDeferred(
- Runtime::kStringCharCodeAt, 2, instr, instr->context());
+ Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
__ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
@@ -4425,7 +4454,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(char_code, char_code);
- __ push(char_code);
+ __ Push(char_code);
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4444,18 +4473,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* output = instr->result();
- __ Integer32ToSmi(ToRegister(output), ToRegister(input));
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4467,22 +4484,6 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange() ||
- instr->hydrogen()->value()->range()->upper() == kMaxInt) {
- // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
- // interval, so we treat kMaxInt as a sentinel for this entire interval.
- __ testl(ToRegister(input), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ Integer32ToSmi(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
@@ -4518,15 +4519,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
- Label slow;
+ Label done, slow;
Register reg = ToRegister(instr->value());
- Register tmp = reg.is(rax) ? rcx : rax;
- XMMRegister temp_xmm = ToDoubleRegister(instr->temp());
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
+ Register tmp = ToRegister(instr->temp1());
+ XMMRegister temp_xmm = ToDoubleRegister(instr->temp2());
- Label done;
// Load value into temp_xmm which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
@@ -4540,29 +4537,31 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains
+ // an integer value.
+ __ Set(reg, 0);
- // Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
- // NumberTagU uses the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
- if (!reg.is(rax)) __ movp(reg, rax);
+ // NumberTagU uses the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, rax);
+ }
// Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
- __ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -4605,11 +4604,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this);
// NumberTagD uses the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ movp(kScratchRegister, rax);
@@ -4619,10 +4618,19 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(instr->value()->Equals(instr->result()));
+ HChange* hchange = instr->hydrogen();
Register input = ToRegister(instr->value());
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ Integer32ToSmi(input, input);
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ testl(input, input);
+ DeoptimizeIf(sign, instr->environment());
+ }
+ __ Integer32ToSmi(output, input);
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -4916,13 +4924,13 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this);
- __ push(object);
+ __ Push(object);
__ Set(rsi, 0);
__ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ testq(rax, Immediate(kSmiTagMask));
+ __ testp(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr->environment());
}
@@ -5011,7 +5019,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
- __ movp(input_reg, Immediate(0));
+ __ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
// Heap number
@@ -5029,6 +5037,30 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ XMMRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ movq(result_reg, value_reg);
+ __ shr(result_reg, Immediate(32));
+ } else {
+ __ movd(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ XMMRegister result_reg = ToDoubleRegister(instr->result());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(result_reg, hi_reg);
+ __ psllq(result_reg, 32);
+ __ movd(xmm_scratch, lo_reg);
+ __ orps(result_reg, xmm_scratch);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5108,7 +5140,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register size = ToRegister(instr->size());
ASSERT(!size.is(result));
__ Integer32ToSmi(size, size);
- __ push(size);
+ __ Push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Push(Smi::FromInt(size));
@@ -5128,14 +5160,14 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
- __ push(rax);
+ __ Push(rax);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
@@ -5156,11 +5188,11 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// Create regexp literal using runtime function
// Result will be in rax.
- __ push(rcx);
+ __ Push(rcx);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
__ Push(instr->hydrogen()->pattern());
__ Push(instr->hydrogen()->flags());
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ movp(rbx, rax);
__ bind(&materialized);
@@ -5170,10 +5202,10 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
- __ push(rbx);
+ __ Push(rbx);
__ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(rbx);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ __ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
@@ -5197,16 +5229,16 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
- __ push(rsi);
+ __ Push(rsi);
__ Push(instr->hydrogen()->shared_info());
__ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
Heap::kFalseValueRootIndex);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5224,9 +5256,9 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
if (operand->IsConstantOperand()) {
__ Push(ToHandle(LConstantOperand::cast(operand)));
} else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
+ __ Push(ToRegister(operand));
} else {
- __ push(ToOperand(operand));
+ __ Push(ToOperand(operand));
}
}
@@ -5365,7 +5397,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5401,7 +5433,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5437,10 +5469,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -5481,7 +5510,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
DeoptimizeIf(equal, instr->environment());
Condition cc = masm()->CheckSmi(rax);
@@ -5499,7 +5528,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(rax);
+ __ Push(rax);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
@@ -5532,7 +5561,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
- __ cmpq(ToRegister(instr->map()),
+ __ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr->environment());
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 431f77b234..37807ede0d 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -86,12 +86,12 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsDehoistedKeyConstant(LConstantOperand* op) const;
bool IsSmiConstant(LConstantOperand* op) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
@@ -130,9 +130,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@@ -160,6 +158,7 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index c3bfd9e612..7c7fc29e03 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -198,7 +198,14 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- __ Set(dst, static_cast<uint32_t>(cgen_->ToInteger32(constant_source)));
+ int32_t constant = cgen_->ToInteger32(constant_source);
+ // Do sign extension only for constant used as de-hoisted array key.
+ // Others only need zero extension, which saves 2 bytes.
+ if (cgen_->IsDehoistedKeyConstant(constant_source)) {
+ __ Set(dst, constant);
+ } else {
+ __ Set(dst, static_cast<uint32_t>(constant));
+ }
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
@@ -218,8 +225,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
- // value.
+ // Do sign extension to 64 bits when stored into stack slot.
__ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(kScratchRegister, cgen_->ToHandle(constant_source));
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 1f2b1e98e0..8c4f24e8fb 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -175,6 +175,19 @@ bool LGoto::HasInterestingComment(LCodeGen* gen) const {
}
+template<int R>
+bool LTemplateResultInstruction<R>::MustSignExtendResult(
+ LPlatformChunk* chunk) const {
+ HValue* hvalue = this->hydrogen_value();
+
+ if (hvalue == NULL) return false;
+ if (!hvalue->representation().IsInteger32()) return false;
+ if (hvalue->HasRange() && !hvalue->range()->CanBeNegative()) return false;
+
+ return chunk->GetDehoistedKeyIds()->Contains(hvalue->id());
+}
+
+
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
@@ -947,18 +960,20 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
if (goto_instr != NULL) return goto_instr;
HValue* value = instr->value();
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
- ToBooleanStub::Types expected = instr->expected_input_types();
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
- !expected.IsGeneric()) {
- return AssignEnvironment(result);
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
- return result;
+ return branch;
}
@@ -1117,6 +1132,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1142,8 +1158,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+ Representation r = instr->value()->representation();
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
@@ -1155,6 +1175,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@@ -1246,24 +1273,72 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+ dividend, divisor, temp1, temp2), rdx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LDivI(
+ dividend, divisor, temp), rax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, rax));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1272,74 +1347,114 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LOperand* temp3 =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result =
+ DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+ divisor,
+ temp1,
+ temp2,
+ temp3),
+ rdx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, rax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
} else {
- // use two r64
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LOperand* temp = TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ return DoDivI(instr);
}
}
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+ dividend, divisor, temp1, temp2), rax);
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LModI(
+ dividend, divisor, temp), rdx);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(left->representation().Equals(instr->representation()));
- ASSERT(right->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right),
- NULL);
- LInstruction* result = DefineSameAsFirst(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LModI* mod = new(zone()) LModI(UseFixed(left, rax),
- UseRegister(right),
- FixedTemp(rdx));
- LInstruction* result = DefineFixed(mod, rdx);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1702,8 +1817,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LInstruction* res = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!instr->value()->representation().IsSmi()) {
+ res = AssignEnvironment(res);
+ }
+ return res;
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -1720,8 +1838,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
+ LInstruction* res =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
+ if (!instr->value()->representation().IsSmi()) {
+ // Note: Only deopts in deferred code.
+ res = AssignEnvironment(res);
+ }
+ return res;
}
}
} else if (from.IsDouble()) {
@@ -1741,41 +1864,37 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToI(value)));
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = FixedTemp(xmm1);
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineSameAsFirst(result));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignPointerMap(DefineSameAsFirst(result));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = NULL;
- if (val->CheckFlag(HInstruction::kUint32)) {
- result = DefineAsRegister(new(zone()) LUint32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange() &&
- val->range()->upper() != kMaxInt) {
- return result;
- }
- } else {
- result = DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
- }
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ ASSERT(val->CheckFlag(HValue::kUint32));
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
LOperand* temp = FixedTemp(xmm1);
@@ -1826,6 +1945,7 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
}
LCheckMaps* result = new(zone()) LCheckMaps(value);
if (!instr->CanOmitMapChecks()) {
+ // Note: Only deopts in deferred code.
AssignEnvironment(result);
if (instr->has_migration_target()) return AssignPointerMap(result);
}
@@ -1852,6 +1972,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -1911,7 +2045,10 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1928,7 +2065,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
temp = NULL;
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1969,32 +2109,51 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
+void LChunkBuilder::FindDehoistedKeyDefinitions(HValue* candidate) {
+ BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds();
+ if (dehoisted_key_ids->Contains(candidate->id())) return;
+ dehoisted_key_ids->Add(candidate->id());
+ if (!candidate->IsPhi()) return;
+ for (int i = 0; i < candidate->OperandCount(); ++i) {
+ FindDehoistedKeyDefinitions(candidate->OperandAt(i));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LInstruction* result = NULL;
+
+ if (instr->IsDehoisted()) {
+ FindDehoistedKeyDefinitions(instr->key());
+ }
if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
+ !(IsDoubleOrFloatElementsKind(elements_kind))) ||
(instr->representation().IsDouble() &&
- (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+ (IsDoubleOrFloatElementsKind(elements_kind))));
LOperand* backing_store = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(backing_store, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UINT32_ELEMENTS) ||
- (elements_kind == UINT32_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2012,6 +2171,10 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
+ if (instr->IsDehoisted()) {
+ FindDehoistedKeyDefinitions(instr->key());
+ }
+
if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2022,7 +2185,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
Representation value_representation = instr->value()->representation();
if (value_representation.IsDouble()) {
object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
+ val = UseRegisterAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(value_representation.IsSmiOrTagged() ||
@@ -2133,7 +2296,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool can_be_constant = instr->value()->IsConstant() &&
HConstant::cast(instr->value())->NotInNewSpace() &&
- !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+ !instr->field_representation().IsDouble();
LOperand* val;
if (needs_write_barrier) {
@@ -2142,10 +2305,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseFixed(instr->value(), rax);
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ } else if (instr->field_representation().IsSmi()) {
val = UseRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2156,12 +2318,13 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = (!is_in_object || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
- LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
+ LInstruction* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if (!instr->access().IsExternalMemory() &&
+ instr->field_representation().IsHeapObject() &&
+ (val->IsConstantOperand()
+ ? HConstant::cast(instr->value())->HasSmiValue()
+ : !instr->value()->type().IsHeapObject())) {
+ result = AssignEnvironment(result);
}
return result;
}
@@ -2193,7 +2356,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 36b2744401..9d9ac1ea17 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -80,17 +80,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(DummyUse) \
V(Dummy) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -103,7 +109,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -124,14 +129,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -170,7 +177,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@@ -265,6 +271,10 @@ class LInstruction : public ZoneObject {
virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+ virtual bool MustSignExtendResult(LPlatformChunk* chunk) const {
+ return false;
+ }
+
#ifdef DEBUG
void VerifyCall();
#endif
@@ -300,6 +310,9 @@ class LTemplateResultInstruction : public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
+ virtual bool MustSignExtendResult(
+ LPlatformChunk* chunk) const V8_FINAL V8_OVERRIDE;
+
protected:
EmbeddedContainer<LOperand*, R> results_;
};
@@ -614,6 +627,49 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LModByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -631,6 +687,49 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -643,29 +742,55 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
};
@@ -762,6 +887,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
@@ -1829,19 +1966,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
@@ -1856,19 +1980,6 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -1881,15 +1992,17 @@ class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -1966,6 +2079,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2041,7 +2155,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2096,7 +2210,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2300,6 +2414,33 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2492,10 +2633,18 @@ class LChunkBuilder;
class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
+ : LChunk(info, graph),
+ dehoisted_key_ids_(graph->GetMaximumValueID(), graph->zone()) { }
int GetNextSpillIndex(RegisterKind kind);
LOperand* GetNextSpillSlot(RegisterKind kind);
+ BitVector* GetDehoistedKeyIds() { return &dehoisted_key_ids_; }
+ bool IsDehoistedKey(HValue* value) {
+ return dehoisted_key_ids_.Contains(value->id());
+ }
+
+ private:
+ BitVector dehoisted_key_ids_;
};
@@ -2529,6 +2678,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2637,6 +2795,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
HBinaryOperation* instr);
+ void FindDehoistedKeyDefinitions(HValue* candidate);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 4c19fced69..6f313f7a66 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -128,7 +128,7 @@ void MacroAssembler::LoadAddress(Register destination,
intptr_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -145,7 +145,7 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
intptr_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
- // Operand is lea(scratch, Operand(kRootRegister, delta));
+ // Operand is leap(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
int size = 4;
if (!is_int8(static_cast<int32_t>(delta))) {
@@ -165,11 +165,11 @@ void MacroAssembler::PushAddress(ExternalReference source) {
if (emit_debug_code()) {
Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
}
- push(Immediate(static_cast<int32_t>(address)));
+ Push(Immediate(static_cast<int32_t>(address)));
return;
}
LoadAddress(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
@@ -200,13 +200,13 @@ void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
ASSERT(root_array_available_);
- push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
+ Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
+ cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
@@ -216,7 +216,7 @@ void MacroAssembler::CompareRoot(const Operand& with,
ASSERT(root_array_available_);
ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
+ cmpp(with, kScratchRegister);
}
@@ -236,13 +236,13 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Store pointer to buffer.
movp(Operand(scratch, 0), addr);
// Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
+ addp(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Call stub on end of buffer.
Label done;
// Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
j(not_equal, &buffer_overflowed, Label::kNear);
@@ -276,13 +276,13 @@ void MacroAssembler::InNewSpace(Register object,
// and the running system.
if (scratch.is(object)) {
Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
+ andp(scratch, kScratchRegister);
} else {
Move(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
+ andp(scratch, object);
}
Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
+ cmpp(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
@@ -291,11 +291,11 @@ void MacroAssembler::InNewSpace(Register object,
Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
Assembler::RelocInfoNone());
if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
+ addp(scratch, kScratchRegister);
} else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ leap(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch,
+ andp(scratch,
Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
@@ -323,7 +323,7 @@ void MacroAssembler::RecordWriteField(
// of the object, so so offset must be a multiple of kPointerSize.
ASSERT(IsAligned(offset, kPointerSize));
- lea(dst, FieldOperand(object, offset));
+ leap(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
@@ -363,7 +363,7 @@ void MacroAssembler::RecordWriteArray(Register object,
// Array access: calculate the destination address. Index is not a smi.
Register dst = index;
- lea(dst, Operand(object, index, times_pointer_size,
+ leap(dst, Operand(object, index, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
RecordWrite(
@@ -398,7 +398,7 @@ void MacroAssembler::RecordWrite(Register object,
if (emit_debug_code()) {
Label ok;
- cmpq(value, Operand(address, 0));
+ cmpp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
@@ -483,7 +483,7 @@ void MacroAssembler::CheckStackAlignment() {
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
+ testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
@@ -505,17 +505,8 @@ void MacroAssembler::NegativeZeroTest(Register result,
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -527,21 +518,18 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- push(rax);
- Move(kScratchRegister, reinterpret_cast<Smi*>(p0),
- Assembler::RelocInfoNone());
- push(kScratchRegister);
- Move(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
+ Push(rax);
+ Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
Assembler::RelocInfoNone());
- push(kScratchRegister);
+ Push(kScratchRegister);
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Control will not return here.
int3();
@@ -572,7 +560,7 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
+ addp(rsp, Immediate(num_arguments * kPointerSize));
}
LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
@@ -588,7 +576,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
// the slow case, converting the key to a smi is always valid.
// key: string key
// hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
+ andp(hash, Immediate(String::kArrayIndexValueMask));
shr(hash, Immediate(String::kHashShift));
// Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
@@ -757,7 +745,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
@@ -812,7 +800,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
}
jmp(&exception_handled);
@@ -893,12 +881,12 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
+ pushq(reg);
}
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(rsp, i * kDoubleSize), reg);
@@ -916,12 +904,12 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
+ popq(reg);
}
}
}
@@ -984,12 +972,17 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
+void MacroAssembler::Set(const Operand& dst, intptr_t x) {
+ if (kPointerSize == kInt64Size) {
+ if (is_int32(x)) {
+ movp(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ Set(kScratchRegister, x);
+ movp(dst, kScratchRegister);
+ }
} else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
+ ASSERT(kPointerSize == kInt32Size);
+ movp(dst, Immediate(static_cast<int32_t>(x)));
}
}
@@ -1009,7 +1002,7 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) {
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(dst, kScratchRegister);
+ xorq(dst, kScratchRegister);
} else {
Move(dst, src);
}
@@ -1021,7 +1014,7 @@ void MacroAssembler::SafePush(Smi* src) {
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Push(Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(Operand(rsp, 0), kScratchRegister);
+ xorq(Operand(rsp, 0), kScratchRegister);
} else {
Push(src);
}
@@ -1059,24 +1052,28 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
switch (uvalue) {
case 9:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
break;
case 8:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
break;
case 4:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
break;
case 5:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
break;
case 3:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
break;
case 2:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
break;
case 1:
movp(dst, kSmiConstantRegister);
@@ -1089,7 +1086,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
return;
}
if (negative) {
- neg(dst);
+ negp(dst);
}
}
@@ -1158,14 +1155,14 @@ void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
void MacroAssembler::SmiTest(Register src) {
AssertSmi(src);
- testq(src, src);
+ testp(src, src);
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
- cmpq(smi1, smi2);
+ cmpp(smi1, smi2);
}
@@ -1178,10 +1175,10 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
void MacroAssembler::Cmp(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
if (src->value() == 0) {
- testq(dst, dst);
+ testp(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
+ cmpp(dst, constant_reg);
}
}
@@ -1189,14 +1186,14 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
@@ -1210,7 +1207,7 @@ void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
+ cmpp(dst, smi_reg);
}
@@ -1258,12 +1255,12 @@ void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
movp(kScratchRegister, src1);
- or_(kScratchRegister, src2);
+ orp(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
movp(dst, kScratchRegister);
} else {
movp(dst, src1);
- or_(dst, src2);
+ orp(dst, src2);
JumpIfNotSmi(dst, on_not_smis, near_jump);
}
}
@@ -1310,7 +1307,7 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
return CheckNonNegativeSmi(first);
}
movp(kScratchRegister, first);
- or_(kScratchRegister, second);
+ orp(kScratchRegister, second);
rol(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
return zero;
@@ -1339,7 +1336,7 @@ Condition MacroAssembler::CheckEitherSmi(Register first,
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
+ cmpp(src, kSmiConstantRegister);
return overflow;
}
@@ -1456,39 +1453,39 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
ASSERT(!dst.is(kScratchRegister));
switch (constant->value()) {
case 1:
- addq(dst, kSmiConstantRegister);
+ addp(dst, kSmiConstantRegister);
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
+ addp(dst, constant_reg);
return;
}
} else {
switch (constant->value()) {
case 1:
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
return;
}
}
@@ -1515,16 +1512,16 @@ void MacroAssembler::SmiAddConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1538,7 +1535,7 @@ void MacroAssembler::SmiAddConstant(Register dst,
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1552,17 +1549,17 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
+ subp(dst, constant_reg);
} else {
if (constant->value() == Smi::kMinValue) {
LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ addp(dst, src);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
+ addp(dst, src);
}
}
}
@@ -1581,16 +1578,16 @@ void MacroAssembler::SmiSubConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1607,12 +1604,12 @@ void MacroAssembler::SmiSubConstant(Register dst,
ASSERT(!dst.is(kScratchRegister));
movp(dst, src);
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1626,15 +1623,15 @@ void MacroAssembler::SmiNeg(Register dst,
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
movp(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
+ negp(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
movp(src, kScratchRegister);
} else {
movp(dst, src);
- neg(dst);
- cmpq(dst, src);
+ negp(dst);
+ cmpp(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
j(not_equal, on_smi_result, near_jump);
}
@@ -1650,15 +1647,15 @@ static void SmiAddHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
masm->movp(dst, src1);
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1694,12 +1691,12 @@ void MacroAssembler::SmiAdd(Register dst,
if (!dst.is(src1)) {
if (emit_debug_code()) {
movp(kScratchRegister, src1);
- addq(kScratchRegister, src2);
+ addp(kScratchRegister, src2);
Check(no_overflow, kSmiAdditionOverflow);
}
- lea(dst, Operand(src1, src2, times_1, 0));
+ leap(dst, Operand(src1, src2, times_1, 0));
} else {
- addq(dst, src2);
+ addp(dst, src2);
Assert(no_overflow, kSmiAdditionOverflow);
}
}
@@ -1714,15 +1711,15 @@ static void SmiSubHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
masm->movp(dst, src1);
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1760,7 +1757,7 @@ static void SmiSubNoOverflowHelper(MacroAssembler* masm,
if (!dst.is(src1)) {
masm->movp(dst, src1);
}
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->Assert(no_overflow, kSmiSubtractionOverflow);
}
@@ -1792,17 +1789,17 @@ void MacroAssembler::SmiMul(Register dst,
Label failure, zero_correct_result;
movp(kScratchRegister, src1); // Create backup for later testing.
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, &failure, Label::kNear);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
movp(dst, kScratchRegister);
- xor_(dst, src2);
+ xorp(dst, src2);
// Result was positive zero.
j(positive, &zero_correct_result, Label::kNear);
@@ -1816,17 +1813,17 @@ void MacroAssembler::SmiMul(Register dst,
bind(&correct_result);
} else {
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, on_not_smi_result, near_jump);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
movp(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
+ xorp(kScratchRegister, src2);
j(negative, on_not_smi_result, near_jump);
bind(&correct_result);
}
@@ -1846,7 +1843,7 @@ void MacroAssembler::SmiDiv(Register dst,
ASSERT(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
@@ -1863,7 +1860,7 @@ void MacroAssembler::SmiDiv(Register dst,
Label safe_div;
testl(rax, Immediate(0x7fffffff));
j(not_zero, &safe_div, Label::kNear);
- testq(src2, src2);
+ testp(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
movp(src1, kScratchRegister);
@@ -1909,7 +1906,7 @@ void MacroAssembler::SmiMod(Register dst,
ASSERT(!src1.is(rdx));
ASSERT(!src1.is(src2));
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
@@ -1945,7 +1942,7 @@ void MacroAssembler::SmiMod(Register dst,
Label smi_result;
testl(rdx, rdx);
j(not_zero, &smi_result, Label::kNear);
- testq(src1, src1);
+ testp(src1, src1);
j(negative, on_not_smi_result, near_jump);
bind(&smi_result);
Integer32ToSmi(dst, rdx);
@@ -1958,11 +1955,11 @@ void MacroAssembler::SmiNot(Register dst, Register src) {
// Set tag and padding bits before negating, so that they are zero afterwards.
movl(kScratchRegister, Immediate(~0));
if (dst.is(src)) {
- xor_(dst, kScratchRegister);
+ xorp(dst, kScratchRegister);
} else {
- lea(dst, Operand(src, kScratchRegister, times_1, 0));
+ leap(dst, Operand(src, kScratchRegister, times_1, 0));
}
- not_(dst);
+ notp(dst);
}
@@ -1971,7 +1968,7 @@ void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
movp(dst, src1);
}
- and_(dst, src2);
+ andp(dst, src2);
}
@@ -1981,10 +1978,10 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
+ andp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- and_(dst, src);
+ andp(dst, src);
}
}
@@ -1994,7 +1991,7 @@ void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
ASSERT(!src1.is(src2));
movp(dst, src1);
}
- or_(dst, src2);
+ orp(dst, src2);
}
@@ -2002,10 +1999,10 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
+ orp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- or_(dst, src);
+ orp(dst, src);
}
}
@@ -2015,7 +2012,7 @@ void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
ASSERT(!src1.is(src2));
movp(dst, src1);
}
- xor_(dst, src2);
+ xorp(dst, src2);
}
@@ -2023,10 +2020,10 @@ void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
+ xorp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- xor_(dst, src);
+ xorp(dst, src);
}
}
@@ -2067,7 +2064,7 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
} else {
movp(dst, src);
if (shift_value == 0) {
- testq(dst, dst);
+ testp(dst, dst);
j(negative, on_not_smi_result, near_jump);
}
shr(dst, Immediate(shift_value + kSmiShift));
@@ -2086,7 +2083,7 @@ void MacroAssembler::SmiShiftLeft(Register dst,
}
SmiToInteger32(rcx, src2);
// Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
+ andq(rcx, Immediate(0x1f));
shl_cl(dst);
}
@@ -2175,7 +2172,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
+ andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
// If non-zero then both are smis.
j(not_zero, on_not_smis, near_jump);
@@ -2183,13 +2180,13 @@ void MacroAssembler::SelectNonSmi(Register dst,
// Exactly one operand is a smi.
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
+ subp(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
movp(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
+ xorp(dst, src2);
+ andp(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
+ xorp(dst, src1);
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
@@ -2219,7 +2216,7 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
if (!dst.is(src)) {
movq(dst, src);
}
- neg(dst);
+ negq(dst);
if (shift < kSmiShift) {
sar(dst, Immediate(kSmiShift - shift));
} else {
@@ -2238,10 +2235,10 @@ void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
void MacroAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
+ Push(Immediate(static_cast<int32_t>(smi)));
} else {
Register constant = GetSmiConstant(source);
- push(constant);
+ Push(constant);
}
}
@@ -2251,22 +2248,22 @@ void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
// High bits.
shr(src, Immediate(64 - kSmiShift));
shl(src, Immediate(kSmiShift));
- push(src);
+ Push(src);
// Low bits.
shl(scratch, Immediate(kSmiShift));
- push(scratch);
+ Push(scratch);
}
void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
- pop(scratch);
+ Pop(scratch);
// Low bits.
shr(scratch, Immediate(kSmiShift));
- pop(dst);
+ Pop(dst);
shr(dst, Immediate(kSmiShift));
// High bits.
shl(dst, Immediate(64 - kSmiShift));
- or_(dst, scratch);
+ orp(dst, scratch);
}
@@ -2296,7 +2293,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
SmiToInteger32(
mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
shrl(mask, Immediate(1));
- subq(mask, Immediate(1)); // Make mask.
+ subp(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@@ -2312,8 +2309,8 @@ void MacroAssembler::LookupNumberStringCache(Register object,
STATIC_ASSERT(8 == kDoubleSize);
movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- and_(scratch, mask);
+ xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
@@ -2336,7 +2333,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
bind(&is_smi);
SmiToInteger32(scratch, object);
- and_(scratch, mask);
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
@@ -2344,7 +2341,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
shl(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
- cmpq(object,
+ cmpp(object,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2401,7 +2398,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
@@ -2448,7 +2445,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
@@ -2520,7 +2517,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
@@ -2531,7 +2528,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
@@ -2542,7 +2539,7 @@ void MacroAssembler::Push(Handle<Object> source) {
Push(Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
}
@@ -2574,7 +2571,87 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
+ addp(rsp, Immediate(stack_elements * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::Push(Register src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ // x32 uses 64-bit push for rbp in the prologue.
+ ASSERT(src.code() != rbp.code());
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), src);
+ }
+}
+
+
+void MacroAssembler::Push(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, src);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::Push(Immediate value) {
+ if (kPointerSize == kInt64Size) {
+ pushq(value);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), value);
+ }
+}
+
+
+void MacroAssembler::PushImm32(int32_t imm32) {
+ if (kPointerSize == kInt64Size) {
+ pushq_imm32(imm32);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), Immediate(imm32));
+ }
+}
+
+
+void MacroAssembler::Pop(Register dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ // x32 uses 64-bit pop for rbp in the epilogue.
+ ASSERT(dst.code() != rbp.code());
+ movp(dst, Operand(rsp, 0));
+ leal(rsp, Operand(rsp, 4));
+ }
+}
+
+
+void MacroAssembler::Pop(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ Register scratch = dst.AddressUsesRegister(kScratchRegister)
+ ? kSmiConstantRegister : kScratchRegister;
+ movp(scratch, Operand(rsp, 0));
+ movp(dst, scratch);
+ leal(rsp, Operand(rsp, 4));
+ if (scratch.is(kSmiConstantRegister)) {
+ // Restore kSmiConstantRegister.
+ movp(kSmiConstantRegister,
+ reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
+ Assembler::RelocInfoNone());
+ }
}
}
@@ -2592,6 +2669,17 @@ void MacroAssembler::Jump(ExternalReference ext) {
}
+void MacroAssembler::Jump(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ jmp(op);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, op);
+ jmp(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
@@ -2623,6 +2711,17 @@ void MacroAssembler::Call(ExternalReference ext) {
}
+void MacroAssembler::Call(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ call(op);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, op);
+ call(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(destination);
@@ -2651,26 +2750,26 @@ void MacroAssembler::Call(Handle<Code> code_object,
void MacroAssembler::Pushad() {
- push(rax);
- push(rcx);
- push(rdx);
- push(rbx);
+ Push(rax);
+ Push(rcx);
+ Push(rdx);
+ Push(rbx);
// Not pushing rsp or rbp.
- push(rsi);
- push(rdi);
- push(r8);
- push(r9);
+ Push(rsi);
+ Push(rdi);
+ Push(r8);
+ Push(r9);
// r10 is kScratchRegister.
- push(r11);
+ Push(r11);
// r12 is kSmiConstantRegister.
// r13 is kRootRegister.
- push(r14);
- push(r15);
+ Push(r14);
+ Push(r15);
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, -sp_delta));
+ leap(rsp, Operand(rsp, -sp_delta));
}
@@ -2678,23 +2777,23 @@ void MacroAssembler::Popad() {
// Popad must not change the flags, so use lea instead of addq.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, sp_delta));
- pop(r15);
- pop(r14);
- pop(r11);
- pop(r9);
- pop(r8);
- pop(rdi);
- pop(rsi);
- pop(rbx);
- pop(rdx);
- pop(rcx);
- pop(rax);
+ leap(rsp, Operand(rsp, sp_delta));
+ Pop(r15);
+ Pop(r14);
+ Pop(r11);
+ Pop(r9);
+ Pop(r8);
+ Pop(rdi);
+ Pop(rsi);
+ Pop(rbx);
+ Pop(rdx);
+ Pop(rcx);
+ Pop(rax);
}
void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
+ addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
}
@@ -2759,23 +2858,23 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// The frame pointer does not point to a JS frame so we save NULL for
// rbp. We expect the code throwing an exception to check rbp before
// dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
+ pushq(Immediate(0)); // NULL frame pointer.
Push(Smi::FromInt(0)); // No context.
} else {
- push(rbp);
- push(rsi);
+ pushq(rbp);
+ Push(rsi);
}
// Push the state and the code object.
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
- push(Immediate(state));
+ Push(Immediate(state));
Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(ExternalOperand(handler_address));
+ Push(ExternalOperand(handler_address));
// Set this new handler as the current one.
movp(ExternalOperand(handler_address), rsp);
}
@@ -2784,8 +2883,8 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(ExternalOperand(handler_address));
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ Pop(ExternalOperand(handler_address));
+ addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -2798,7 +2897,7 @@ void MacroAssembler::JumpToHandlerEntry() {
movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
- lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
jmp(rdi);
}
@@ -2821,21 +2920,21 @@ void MacroAssembler::Throw(Register value) {
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
movp(rsp, ExternalOperand(handler_address));
// Restore the next handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Restore the context and frame pointer.
- pop(rsi); // Context.
- pop(rbp); // Frame pointer.
+ Pop(rsi); // Context.
+ popq(rbp); // Frame pointer.
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
// rbp or rsi.
Label skip;
- testq(rsi, rsi);
+ testp(rsi, rsi);
j(zero, &skip, Label::kNear);
movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
@@ -2875,15 +2974,15 @@ void MacroAssembler::ThrowUncatchable(Register value) {
j(not_zero, &fetch_next);
// Set the top handler address to next handler past the top ENTRY handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(rsi);
- pop(rbp);
+ Pop(rsi);
+ popq(rbp);
JumpToHandlerEntry();
}
@@ -2899,7 +2998,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
ret(bytes_dropped);
} else {
PopReturnAddressTo(scratch);
- addq(rsp, Immediate(bytes_dropped));
+ addp(rsp, Immediate(bytes_dropped));
PushReturnAddressFrom(scratch);
ret(0);
}
@@ -3059,10 +3158,10 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
- cmpl(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
+ cmpl(result_reg, Immediate(1));
+ j(overflow, &conv_failure, Label::kNear);
movl(result_reg, Immediate(0));
- setcc(above, result_reg);
+ setcc(sign, result_reg);
subl(result_reg, Immediate(1));
andl(result_reg, Immediate(255));
jmp(&done, Label::kNear);
@@ -3099,16 +3198,15 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
Label done;
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2siq(result_reg, xmm0);
- Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
// Slow case.
if (input_reg.is(result_reg)) {
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), xmm0);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
} else {
SlowTruncateToI(result_reg, input_reg);
}
@@ -3121,14 +3219,13 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
- movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), input_reg);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
bind(&done);
}
@@ -3204,15 +3301,15 @@ void MacroAssembler::Throw(BailoutReason reason) {
}
#endif
- push(rax);
+ Push(rax);
Push(Smi::FromInt(reason));
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// Control will not return here.
int3();
@@ -3244,7 +3341,7 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
movp(dst, FieldOperand(map, Map::kBitField3Offset));
Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
- and_(dst, kScratchRegister);
+ andp(dst, kScratchRegister);
}
@@ -3315,10 +3412,10 @@ void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAString);
- push(object);
+ Push(object);
movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
+ Pop(object);
Check(below, kOperandIsNotAString);
}
}
@@ -3328,22 +3425,35 @@ void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAName);
- push(object);
+ Push(object);
movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
- pop(object);
+ Pop(object);
Check(below_equal, kOperandIsNotAName);
}
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ Cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
BailoutReason reason) {
if (emit_debug_code()) {
ASSERT(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
+ cmpp(src, kScratchRegister);
Check(equal, reason);
}
}
@@ -3591,14 +3701,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
+ cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
ASSERT(expected.reg().is(rbx));
Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
+ cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
ASSERT(actual.reg().is(rax));
ASSERT(expected.reg().is(rbx));
@@ -3609,7 +3719,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
movp(rdx, code_register);
}
@@ -3631,9 +3741,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
- push(rbp); // Caller's frame pointer.
+ pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
- push(rsi); // Callee's context.
+ Push(rsi); // Callee's context.
Push(Smi::FromInt(StackFrame::STUB));
} else {
PredictableCodeSizeScope predictible_code_size_scope(this,
@@ -3644,27 +3754,27 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
RelocInfo::CODE_AGE_SEQUENCE);
Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
} else {
- push(rbp); // Caller's frame pointer.
+ pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
- push(rsi); // Callee's context.
- push(rdi); // Callee's JS function.
+ Push(rsi); // Callee's context.
+ Push(rdi); // Callee's JS function.
}
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(rbp);
+ pushq(rbp);
movp(rbp, rsp);
- push(rsi); // Context.
+ Push(rsi); // Context.
Push(Smi::FromInt(type));
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
+ Push(kScratchRegister);
if (emit_debug_code()) {
Move(kScratchRegister,
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
+ cmpp(Operand(rsp, 0), kScratchRegister);
Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
@@ -3673,11 +3783,11 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+ cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
movp(rsp, rbp);
- pop(rbp);
+ popq(rbp);
}
@@ -3688,14 +3798,14 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
kFPOnStackSize + kPCOnStackSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(rbp);
+ pushq(rbp);
movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
+ Push(Immediate(0)); // Saved entry sp, patched before call.
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister); // Accessed from EditFrame::code_slot.
+ Push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
if (save_rax) {
@@ -3717,14 +3827,14 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (save_doubles) {
int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
arg_stack_space * kRegisterSize;
- subq(rsp, Immediate(space));
+ subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kRegisterSize));
+ subp(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
@@ -3732,7 +3842,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
ASSERT(is_int8(kFrameAlignment));
- and_(rsp, Immediate(-kFrameAlignment));
+ andp(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
@@ -3746,7 +3856,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+ leap(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
@@ -3774,7 +3884,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
+ leap(rsp, Operand(r15, 1 * kPointerSize));
PushReturnAddressFrom(rcx);
@@ -3784,7 +3894,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
movp(rsp, rbp);
- pop(rbp);
+ popq(rbp);
LeaveExitFrameEpilogue(restore_context);
}
@@ -3821,7 +3931,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
+ cmpp(scratch, Immediate(0));
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
@@ -3838,7 +3948,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
// Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
@@ -3849,7 +3959,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
// Preserve original value of holder_reg.
- push(holder_reg);
+ Push(holder_reg);
movp(holder_reg,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
@@ -3859,7 +3969,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- pop(holder_reg);
+ Pop(holder_reg);
}
movp(kScratchRegister,
@@ -3867,7 +3977,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
movp(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
+ cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
bind(&same_contexts);
@@ -3958,14 +4068,14 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
- and_(r2, r1);
+ andp(r2, r1);
// Scale the index by multiplying by the entry size.
ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+ leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
- cmpq(key, FieldOperand(elements,
+ cmpp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
@@ -4005,7 +4115,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
#ifdef DEBUG
// Assert that result actually contains top on entry.
Operand top_operand = ExternalOperand(allocation_top);
- cmpq(result, top_operand);
+ cmpp(result, top_operand);
Check(equal, kUnexpectedAllocationTop);
#endif
return;
@@ -4026,7 +4136,7 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags) {
if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
+ testp(result_end, Immediate(kObjectAlignmentMask));
Check(zero, kUnalignedAllocationInNewSpace);
}
@@ -4086,10 +4196,10 @@ void MacroAssembler::Allocate(int object_size,
if (!top_reg.is(result)) {
movp(top_reg, result);
}
- addq(top_reg, Immediate(object_size));
+ addp(top_reg, Immediate(object_size));
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(top_reg, limit_operand);
+ cmpp(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -4098,14 +4208,14 @@ void MacroAssembler::Allocate(int object_size,
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
if (tag_result) {
- subq(result, Immediate(object_size - kHeapObjectTag));
+ subp(result, Immediate(object_size - kHeapObjectTag));
} else {
- subq(result, Immediate(object_size));
+ subp(result, Immediate(object_size));
}
} else if (tag_result) {
// Tag the result if requested.
ASSERT(kHeapObjectTag == 1);
- incq(result);
+ incp(result);
}
}
@@ -4119,7 +4229,7 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
- lea(result_end, Operand(element_count, element_size, header_size));
+ leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
@@ -4162,10 +4272,10 @@ void MacroAssembler::Allocate(Register object_size,
if (!object_size.is(result_end)) {
movp(result_end, object_size);
}
- addq(result_end, result);
+ addp(result_end, result);
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(result_end, limit_operand);
+ cmpp(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -4173,7 +4283,7 @@ void MacroAssembler::Allocate(Register object_size,
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
+ addp(result, Immediate(kHeapObjectTag));
}
}
@@ -4183,10 +4293,10 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
+ andp(object, Immediate(~kHeapObjectTagMask));
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
- cmpq(object, top_operand);
+ cmpp(object, top_operand);
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
movp(top_operand, object);
@@ -4217,11 +4327,11 @@ void MacroAssembler::AllocateTwoByteString(Register result,
kObjectAlignmentMask;
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
+ leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate two byte string in new space.
@@ -4256,10 +4366,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate ASCII string in new space.
@@ -4405,12 +4515,12 @@ void MacroAssembler::CopyBytes(Register destination,
// at the end of the ranges.
movp(scratch, length);
shrl(length, Immediate(kPointerSizeLog2));
- repmovsq();
+ repmovsp();
// Move remaining bytes of length.
andl(scratch, Immediate(kPointerSize - 1));
movp(length, Operand(source, scratch, times_1, -kPointerSize));
movp(Operand(destination, scratch, times_1, -kPointerSize), length);
- addq(destination, scratch);
+ addp(destination, scratch);
if (min_length <= kLongStringLimit) {
jmp(&done, Label::kNear);
@@ -4426,7 +4536,7 @@ void MacroAssembler::CopyBytes(Register destination,
// Move remaining bytes of length.
movp(scratch, Operand(source, length, times_1, -kPointerSize));
movp(Operand(destination, length, times_1, -kPointerSize), scratch);
- addq(destination, length);
+ addp(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
@@ -4438,8 +4548,8 @@ void MacroAssembler::CopyBytes(Register destination,
bind(&short_loop);
movb(scratch, Operand(source, 0));
movb(Operand(destination, 0), scratch);
- incq(source);
- incq(destination);
+ incp(source);
+ incp(destination);
decl(length);
j(not_zero, &short_loop);
}
@@ -4455,9 +4565,9 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
jmp(&entry);
bind(&loop);
movp(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
+ addp(start_offset, Immediate(kPointerSize));
bind(&entry);
- cmpq(start_offset, end_offset);
+ cmpp(start_offset, end_offset);
j(less, &loop);
}
@@ -4505,7 +4615,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
int offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- cmpq(map_in_out, FieldOperand(scratch, offset));
+ cmpp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
@@ -4515,30 +4625,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- movp(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
@@ -4556,15 +4642,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- movp(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- movp(function,
- Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
@@ -4608,13 +4685,13 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
Abort(kNonObject);
bind(&is_object);
- push(value);
+ Push(value);
movp(value, FieldOperand(string, HeapObject::kMapOffset));
- movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- cmpq(value, Immediate(encoding_mask));
- pop(value);
+ cmpp(value, Immediate(encoding_mask));
+ Pop(value);
Check(equal, kUnexpectedStringType);
// The index is assumed to be untagged coming in, tag it to compare with the
@@ -4642,8 +4719,8 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
ASSERT(IsPowerOf2(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
- and_(rsp, Immediate(-frame_alignment));
+ subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
+ andp(rsp, Immediate(-frame_alignment));
movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
@@ -4712,10 +4789,10 @@ void MacroAssembler::CheckPageFlag(
Label::Distance condition_met_distance) {
ASSERT(cc == zero || cc == not_zero);
if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
movp(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+ andp(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
@@ -4734,7 +4811,7 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Move(scratch, map);
movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
SmiToInteger32(scratch, scratch);
- and_(scratch, Immediate(Map::Deprecated::kMask));
+ andp(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
}
}
@@ -4754,10 +4831,10 @@ void MacroAssembler::JumpIfBlack(Register object,
movp(rcx, mask_scratch);
// Make rcx into a mask that covers both marking bits using the operation
// rcx = mask | (mask << 1).
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
// Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
+ andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
@@ -4791,19 +4868,19 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
movp(rcx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shrl(rcx, Immediate(shift));
- and_(rcx,
+ andp(rcx,
Immediate((Page::kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
- addq(bitmap_reg, rcx);
+ addp(bitmap_reg, rcx);
movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
shl_cl(mask_reg);
}
@@ -4828,20 +4905,20 @@ void MacroAssembler::EnsureNotWhite(
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
- push(mask_scratch);
+ Push(mask_scratch);
// shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ addp(mask_scratch, mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
- pop(mask_scratch);
+ Pop(mask_scratch);
}
// Value is white. We check whether it is data that doesn't need scanning.
@@ -4884,21 +4961,21 @@ void MacroAssembler::EnsureNotWhite(
bind(&not_external);
// Sequential string, either ASCII or UC16.
ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
+ andp(length, Immediate(kStringEncodingMask));
+ xorp(length, Immediate(kStringEncodingMask));
+ addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
+ imulp(length, FieldOperand(value, String::kLengthOffset));
shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
+ addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ andp(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
bind(&done);
@@ -4935,18 +5012,18 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
Label no_elements;
- cmpq(empty_fixed_array_value,
+ cmpp(empty_fixed_array_value,
FieldOperand(rcx, JSObject::kElementsOffset));
j(equal, &no_elements);
// Second chance, the object may be using the empty slow element dictionary.
LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
- cmpq(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
+ cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
bind(&no_elements);
movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpq(rcx, null_value);
+ cmpp(rcx, null_value);
j(not_equal, &next);
}
@@ -4959,12 +5036,12 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- lea(scratch_reg, Operand(receiver_reg,
+ leap(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
Move(kScratchRegister, new_space_start);
- cmpq(scratch_reg, kScratchRegister);
+ cmpp(scratch_reg, kScratchRegister);
j(less, no_memento_found);
- cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
+ cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
@@ -4987,9 +5064,9 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
bind(&loop_again);
movp(current, FieldOperand(current, HeapObject::kMapOffset));
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
- and_(scratch1, Immediate(Map::kElementsKindMask));
+ andp(scratch1, Immediate(Map::kElementsKindMask));
shr(scratch1, Immediate(Map::kElementsKindShift));
- cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
movp(current, FieldOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
@@ -4997,6 +5074,21 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(rax));
+ ASSERT(!dividend.is(rdx));
+ MultiplierAndShift ms(divisor);
+ movl(rax, Immediate(ms.multiplier()));
+ imull(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
+ if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
+ movl(rax, dividend);
+ shrl(rax, Immediate(31));
+ addl(rdx, rax);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 42245aa808..af65a65465 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -336,7 +336,7 @@ class MacroAssembler: public Assembler {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
Move(kRootRegister, roots_array_start);
- addq(kRootRegister, Immediate(kRootRegisterBias));
+ addp(kRootRegister, Immediate(kRootRegisterBias));
}
// ---------------------------------------------------------------------------
@@ -802,7 +802,7 @@ class MacroAssembler: public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(const Operand& dst, int64_t x);
+ void Set(const Operand& dst, intptr_t x);
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -837,12 +837,16 @@ class MacroAssembler: public Assembler {
void Drop(int stack_elements);
void Call(Label* target) { call(target); }
- void Push(Register src) { push(src); }
- void Pop(Register dst) { pop(dst); }
- void PushReturnAddressFrom(Register src) { push(src); }
- void PopReturnAddressTo(Register dst) { pop(dst); }
+ void Push(Register src);
+ void Push(const Operand& src);
+ void Push(Immediate value);
+ void PushImm32(int32_t imm32);
+ void Pop(Register dst);
+ void Pop(const Operand& dst);
+ void PushReturnAddressFrom(Register src) { pushq(src); }
+ void PopReturnAddressTo(Register dst) { popq(dst); }
void Move(Register dst, ExternalReference ext) {
- movp(dst, reinterpret_cast<Address>(ext.address()),
+ movp(dst, reinterpret_cast<void*>(ext.address()),
RelocInfo::EXTERNAL_REFERENCE);
}
@@ -859,16 +863,18 @@ class MacroAssembler: public Assembler {
ASSERT(!RelocInfo::IsNone(rmode));
ASSERT(value->IsHeapObject());
ASSERT(!isolate()->heap()->InNewSpace(*value));
- movp(dst, value.location(), rmode);
+ movp(dst, reinterpret_cast<void*>(value.location()), rmode);
}
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
+ void Jump(const Operand& op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
+ void Call(const Operand& op);
void Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
@@ -1021,7 +1027,7 @@ class MacroAssembler: public Assembler {
static const int shift = Field::kShift + kSmiShift;
static const int mask = Field::kMask >> Field::kShift;
shr(reg, Immediate(shift));
- and_(reg, Immediate(mask));
+ andp(reg, Immediate(mask));
shl(reg, Immediate(kSmiShift));
}
@@ -1045,6 +1051,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object);
+
// Abort execution if argument is not the root value with the given index,
// enabled via --debug-code.
void AssertRootValue(Register src,
@@ -1232,15 +1242,8 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same.
@@ -1368,6 +1371,10 @@ class MacroAssembler: public Assembler {
Register filler);
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged, the result is in rdx, and rax gets clobbered.
+ void TruncatingDiv(Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1605,9 +1612,9 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
masm->pushfq(); \
masm->Pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
+ masm->Push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
- masm->pop(rax); \
+ masm->Pop(rax); \
masm->Popad(); \
masm->popfq(); \
} \
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 75e70c5975..c819c71cb9 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -166,7 +166,7 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
ASSERT(reg >= 0);
ASSERT(reg < num_registers_);
if (by != 0) {
- __ addq(register_location(reg), Immediate(by));
+ __ addp(register_location(reg), Immediate(by));
}
}
@@ -175,7 +175,7 @@ void RegExpMacroAssemblerX64::Backtrack() {
CheckPreemption();
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(rbx);
- __ addq(rbx, code_object_pointer());
+ __ addp(rbx, code_object_pointer());
__ jmp(rbx);
}
@@ -203,8 +203,8 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(equal, on_at_start);
__ bind(&not_at_start);
}
@@ -215,8 +215,8 @@ void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -243,7 +243,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
Label fallthrough;
__ movq(rdx, register_location(start_reg)); // Offset of start of capture
__ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
- __ subq(rbx, rdx); // Length of capture.
+ __ subp(rbx, rdx); // Length of capture.
// -----------------------
// rdx = Start offset of capture.
@@ -273,9 +273,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
on_no_match = &backtrack_label_;
}
- __ lea(r9, Operand(rsi, rdx, times_1, 0));
- __ lea(r11, Operand(rsi, rdi, times_1, 0));
- __ addq(rbx, r9); // End of capture
+ __ leap(r9, Operand(rsi, rdx, times_1, 0));
+ __ leap(r11, Operand(rsi, rdi, times_1, 0));
+ __ addp(rbx, r9); // End of capture
// ---------------------
// r11 - current input character address
// r9 - current capture character address
@@ -293,8 +293,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Mismatch, try case-insensitive match (converting letters to lower-case).
// I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
// a match.
- __ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
- __ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case.
+ __ orp(rax, Immediate(0x20)); // Convert match character to lower-case.
+ __ orp(rdx, Immediate(0x20)); // Convert capture character to lower-case.
__ cmpb(rax, rdx);
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
@@ -308,10 +308,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(equal, on_no_match);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
- __ addq(r11, Immediate(1));
- __ addq(r9, Immediate(1));
+ __ addp(r11, Immediate(1));
+ __ addp(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
- __ cmpq(r9, rbx);
+ __ cmpp(r9, rbx);
__ j(below, &loop);
// Compute new value of character position after the matched part.
@@ -322,10 +322,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Save important/volatile registers before calling C function.
#ifndef _WIN64
// Caller save on Linux and callee save in Windows.
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rsi);
+ __ pushq(rdi);
#endif
- __ push(backtrack_stackpointer());
+ __ pushq(backtrack_stackpointer());
static const int num_arguments = 4;
__ PrepareCallCFunction(num_arguments);
@@ -337,18 +337,18 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Isolate* isolate
#ifdef _WIN64
// Compute and set byte_offset1 (start of capture).
- __ lea(rcx, Operand(rsi, rdx, times_1, 0));
+ __ leap(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ lea(rdx, Operand(rsi, rdi, times_1, 0));
+ __ leap(rdx, Operand(rsi, rdi, times_1, 0));
// Set byte_length.
__ movp(r8, rbx);
// Isolate.
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#else // AMD64 calling convention
// Compute byte_offset2 (current position = rsi+rdi).
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
// Compute and set byte_offset1 (start of capture).
- __ lea(rdi, Operand(rsi, rdx, times_1, 0));
+ __ leap(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
__ movp(rsi, rax);
// Set byte_length.
@@ -367,14 +367,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(backtrack_stackpointer());
+ __ popq(backtrack_stackpointer());
#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
+ __ popq(rdi);
+ __ popq(rsi);
#endif
// Check if function returned non-zero for success or zero for failure.
- __ testq(rax, rax);
+ __ testp(rax, rax);
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
@@ -392,7 +392,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
// Find length of back-referenced capture.
__ movq(rdx, register_location(start_reg));
__ movq(rax, register_location(start_reg + 1));
- __ subq(rax, rdx); // Length to check.
+ __ subp(rax, rdx); // Length to check.
// Fail on partial or illegal capture (start of capture after end of capture).
// This must not happen (no back-reference can reference a capture that wasn't
@@ -412,9 +412,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
BranchOrBacktrack(greater, on_no_match);
// Compute pointers to match string and capture string
- __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
- __ addq(rdx, rsi); // Start of capture.
- __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture
+ __ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
+ __ addp(rdx, rsi); // Start of capture.
+ __ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture
// -----------------------
// rbx - current capture character address.
@@ -433,10 +433,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
}
BranchOrBacktrack(not_equal, on_no_match);
// Increment pointers into capture and match string.
- __ addq(rbx, Immediate(char_size()));
- __ addq(rdx, Immediate(char_size()));
+ __ addp(rbx, Immediate(char_size()));
+ __ addp(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
- __ cmpq(rdx, r9);
+ __ cmpp(rdx, r9);
__ j(below, &loop);
// Success.
@@ -462,7 +462,7 @@ void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(equal, on_equal);
@@ -476,7 +476,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(not_equal, on_not_equal);
@@ -489,8 +489,8 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
uc16 mask,
Label* on_not_equal) {
ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ lea(rax, Operand(current_character(), -minus));
- __ and_(rax, Immediate(mask));
+ __ leap(rax, Operand(current_character(), -minus));
+ __ andp(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
BranchOrBacktrack(not_equal, on_not_equal);
}
@@ -523,7 +523,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
Register index = current_character();
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ movp(rbx, current_character());
- __ and_(rbx, Immediate(kTableMask));
+ __ andp(rbx, Immediate(kTableMask));
index = rbx;
}
__ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
@@ -536,7 +536,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check, using the sequence:
- // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
+ // leap(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
// cmp(rax, Immediate(max - min))
switch (type) {
case 's':
@@ -547,7 +547,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success, Label::kNear);
// Check range 0x09..0x0d
- __ lea(rax, Operand(current_character(), -'\t'));
+ __ leap(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t'));
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
@@ -562,20 +562,20 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- __ lea(rax, Operand(current_character(), -'0'));
+ __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- __ lea(rax, Operand(current_character(), -'0'));
+ __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -593,7 +593,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -674,7 +674,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
FrameScope scope(&masm_, StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
@@ -686,9 +686,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(Operand(rbp, kInputStart), r8);
__ movq(Operand(rbp, kInputEnd), r9);
// Callee-save on Win64.
- __ push(rsi);
- __ push(rdi);
- __ push(rbx);
+ __ pushq(rsi);
+ __ pushq(rdi);
+ __ pushq(rbx);
#else
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
// Push register parameters on stack for reference.
@@ -698,18 +698,18 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ASSERT_EQ(kInputEnd, -4 * kPointerSize);
ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize);
- __ push(rdi);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx);
- __ push(r8);
- __ push(r9);
-
- __ push(rbx); // Callee-save
+ __ pushq(rdi);
+ __ pushq(rsi);
+ __ pushq(rdx);
+ __ pushq(rcx);
+ __ pushq(r8);
+ __ pushq(r9);
+
+ __ pushq(rbx); // Callee-save
#endif
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ Push(Immediate(0)); // Number of successful matches in a global regexp.
+ __ Push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -719,12 +719,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference::address_of_stack_limit(isolate());
__ movp(rcx, rsp);
__ Move(kScratchRegister, stack_limit);
- __ subq(rcx, Operand(kScratchRegister, 0));
+ __ subp(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
+ __ cmpp(rcx, Immediate(num_registers_ * kPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -734,28 +734,28 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_rax);
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ subq(rsp, Immediate(num_registers_ * kPointerSize));
+ __ subp(rsp, Immediate(num_registers_ * kPointerSize));
// Load string length.
__ movp(rsi, Operand(rbp, kInputEnd));
// Load input position.
__ movp(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
- __ subq(rdi, rsi);
+ __ subp(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
__ movp(rbx, Operand(rbp, kStartIndex));
- __ neg(rbx);
+ __ negq(rbx);
if (mode_ == UC16) {
- __ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
+ __ leap(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
- __ lea(rax, Operand(rdi, rbx, times_1, -char_size()));
+ __ leap(rax, Operand(rdi, rbx, times_1, -char_size()));
}
// Store this value in a local variable, for use when clearing
// position registers.
@@ -824,11 +824,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rdx, Operand(rbp, kStartIndex));
__ movp(rbx, Operand(rbp, kRegisterOutput));
__ movp(rcx, Operand(rbp, kInputEnd));
- __ subq(rcx, Operand(rbp, kInputStart));
+ __ subp(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
- __ lea(rcx, Operand(rcx, rdx, times_2, 0));
+ __ leap(rcx, Operand(rcx, rdx, times_2, 0));
} else {
- __ addq(rcx, rdx);
+ __ addp(rcx, rdx);
}
for (int i = 0; i < num_saved_registers_; i++) {
__ movq(rax, register_location(i));
@@ -836,7 +836,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Keep capture start in rdx for the zero-length check later.
__ movp(rdx, rax);
}
- __ addq(rax, rcx); // Convert to index from start, not end.
+ __ addp(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
__ sar(rax, Immediate(1)); // Convert byte index to character index.
}
@@ -847,18 +847,18 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
- __ incq(Operand(rbp, kSuccessfulCaptures));
+ __ incp(Operand(rbp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
- __ subq(rcx, Immediate(num_saved_registers_));
+ __ subp(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
- __ cmpq(rcx, Immediate(num_saved_registers_));
+ __ cmpp(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ movp(Operand(rbp, kNumOutputRegisters), rcx);
// Advance the location for output.
- __ addq(Operand(rbp, kRegisterOutput),
+ __ addp(Operand(rbp, kRegisterOutput),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
@@ -867,11 +867,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
- __ cmpq(rdi, rdx);
+ __ cmpp(rdi, rdx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// rdi (offset from the end) is zero if we already reached the end.
- __ testq(rdi, rdi);
+ __ testp(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
@@ -896,10 +896,10 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&return_rax);
#ifdef _WIN64
// Restore callee save registers.
- __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rsi);
+ __ leap(rsp, Operand(rbp, kLastCalleeSaveRegister));
+ __ popq(rbx);
+ __ popq(rdi);
+ __ popq(rsi);
// Stack now at rbp.
#else
// Restore callee save register.
@@ -908,7 +908,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rsp, rbp);
#endif
// Exit function frame, restore previous one.
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
// Backtrack code (branch target for conditional backtracks).
@@ -923,19 +923,19 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
- __ push(backtrack_stackpointer());
- __ push(rdi);
+ __ pushq(backtrack_stackpointer());
+ __ pushq(rdi);
CallCheckStackGuardState();
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_rax);
// Restore registers.
__ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(rdi);
- __ pop(backtrack_stackpointer());
+ __ popq(rdi);
+ __ popq(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
__ movp(rsi, Operand(rbp, kInputEnd));
SafeReturn();
@@ -950,8 +950,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Save registers before calling C function
#ifndef _WIN64
// Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rsi);
+ __ pushq(rdi);
#endif
// Call GrowStack(backtrack_stackpointer())
@@ -960,12 +960,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
- __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
+ __ leap(rdx, Operand(rbp, kStackHighEnd)); // Second argument
__ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else
// AMD64 ABI passes parameters in rdi, rsi, rdx.
__ movp(rdi, backtrack_stackpointer()); // First argument.
- __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
+ __ leap(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif
ExternalReference grow_stack =
@@ -973,15 +973,15 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ movp(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
+ __ popq(rdi);
+ __ popq(rsi);
#endif
SafeReturn();
}
@@ -1015,7 +1015,7 @@ void RegExpMacroAssemblerX64::GoTo(Label* to) {
void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(greater_equal, if_ge);
}
@@ -1023,14 +1023,14 @@ void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(less, if_lt);
}
void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
Label* if_eq) {
- __ cmpq(rdi, register_location(reg));
+ __ cmpp(rdi, register_location(reg));
BranchOrBacktrack(equal, if_eq);
}
@@ -1091,13 +1091,13 @@ void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
__ movq(backtrack_stackpointer(), register_location(reg));
- __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
}
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
- __ cmpq(rdi, Immediate(-by * char_size()));
+ __ cmpp(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
@@ -1125,7 +1125,7 @@ void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
if (cp_offset == 0) {
__ movp(register_location(reg), rdi);
} else {
- __ lea(rax, Operand(rdi, cp_offset * char_size()));
+ __ leap(rax, Operand(rdi, cp_offset * char_size()));
__ movp(register_location(reg), rax);
}
}
@@ -1142,7 +1142,7 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
__ movp(rax, backtrack_stackpointer());
- __ subq(rax, Operand(rbp, kStackHighEnd));
+ __ subp(rax, Operand(rbp, kStackHighEnd));
__ movp(register_location(reg), rax);
}
@@ -1161,7 +1161,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movp(r8, rbp);
// First argument: Next address on the stack (will be address of
// return address).
- __ lea(rcx, Operand(rsp, -kPointerSize));
+ __ leap(rcx, Operand(rsp, -kPointerSize));
#else
// Third argument: RegExp code frame pointer.
__ movp(rdx, rbp);
@@ -1169,7 +1169,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movp(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
- __ lea(rdi, Operand(rsp, -kPointerSize));
+ __ leap(rdi, Operand(rsp, -kRegisterSize));
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state(isolate());
@@ -1323,12 +1323,12 @@ void RegExpMacroAssemblerX64::SafeCall(Label* to) {
void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
__ bind(label);
- __ subq(Operand(rsp, 0), code_object_pointer());
+ __ subp(Operand(rsp, 0), code_object_pointer());
}
void RegExpMacroAssemblerX64::SafeReturn() {
- __ addq(Operand(rsp, 0), code_object_pointer());
+ __ addp(Operand(rsp, 0), code_object_pointer());
__ ret(0);
}
@@ -1336,14 +1336,14 @@ void RegExpMacroAssemblerX64::SafeReturn() {
void RegExpMacroAssemblerX64::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
// Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerX64::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), value);
}
@@ -1367,7 +1367,7 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
MarkPositionForCodeRelativeFixup();
}
@@ -1377,12 +1377,12 @@ void RegExpMacroAssemblerX64::Pop(Register target) {
ASSERT(!target.is(backtrack_stackpointer()));
__ movsxlq(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
void RegExpMacroAssemblerX64::Drop() {
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
@@ -1392,7 +1392,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(rsp, rax);
+ __ cmpp(rsp, rax);
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
@@ -1406,7 +1406,7 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
+ __ cmpp(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index a43d709b17..13e822da2b 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -49,10 +49,12 @@ static void ProbeTable(Isolate* isolate,
// The offset is scaled by 4, based on
// kHeapObjectTagSize, which is two bits
Register offset) {
- // We need to scale up the pointer by 2 because the offset is scaled by less
+ // We need to scale up the pointer by 2 when the offset is scaled by less
// than the pointer size.
- ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
- ScaleFactor scale_factor = times_2;
+ ASSERT(kPointerSize == kInt64Size
+ ? kPointerSizeLog2 == kHeapObjectTagSize + 1
+ : kPointerSizeLog2 == kHeapObjectTagSize);
+ ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
@@ -62,7 +64,7 @@ static void ProbeTable(Isolate* isolate,
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
+ __ leap(offset, Operand(offset, offset, times_2, 0));
__ LoadAddress(kScratchRegister, key_offset);
@@ -77,7 +79,7 @@ static void ProbeTable(Isolate* isolate,
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
__ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
@@ -87,7 +89,7 @@ static void ProbeTable(Isolate* isolate,
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ cmpl(offset, Immediate(flags));
__ j(not_equal, &miss);
@@ -100,7 +102,7 @@ static void ProbeTable(Isolate* isolate,
#endif
// Jump to the first instruction in the code stub.
- __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(kScratchRegister);
__ bind(&miss);
@@ -193,10 +195,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
+ __ xorp(scratch, Immediate(flags));
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
@@ -204,11 +206,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ xorp(scratch, Immediate(flags));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ subl(scratch, name);
__ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
@@ -281,54 +283,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ testl(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ movp(rax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movp(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movp(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register result,
@@ -346,7 +300,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -368,13 +322,13 @@ static void PushInterceptorArguments(MacroAssembler* masm,
STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
- __ push(name);
+ __ Push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
__ Move(kScratchRegister, interceptor);
- __ push(kScratchRegister);
- __ push(receiver);
- __ push(holder);
+ __ Push(kScratchRegister);
+ __ Push(receiver);
+ __ Push(holder);
}
@@ -393,24 +347,25 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
ASSERT(optimization.is_simple_api_call());
__ PopReturnAddressTo(scratch_in);
// receiver
- __ push(receiver);
+ __ Push(receiver);
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
Register arg = values[argc-1-i];
ASSERT(!receiver.is(arg));
ASSERT(!scratch_in.is(arg));
- __ push(arg);
+ __ Push(arg);
}
__ PushReturnAddressFrom(scratch_in);
// Stack now matches JSFunction abi.
@@ -465,7 +420,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
// Jump to stub.
- CallApiFunctionStub stub(true, call_data_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -536,11 +491,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ Cmp(value_reg, constant);
__ j(not_equal, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, slow);
@@ -568,9 +523,9 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ PopReturnAddressTo(scratch1);
- __ push(receiver_reg);
+ __ Push(receiver_reg);
__ Push(transition);
- __ push(value_reg);
+ __ Push(value_reg);
__ PushReturnAddressFrom(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -613,15 +568,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ movp(FieldOperand(receiver_reg, offset), storage_reg);
} else {
__ movp(FieldOperand(receiver_reg, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ movp(storage_reg, value_reg);
}
__ RecordWriteField(
@@ -633,15 +588,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ movp(FieldOperand(scratch1, offset), storage_reg);
} else {
__ movp(FieldOperand(scratch1, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ movp(storage_reg, value_reg);
}
__ RecordWriteField(
@@ -680,11 +635,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -723,7 +678,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ movp(FieldOperand(receiver_reg, offset), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movp(name_reg, value_reg);
@@ -738,7 +693,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ movp(FieldOperand(scratch1, offset), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movp(name_reg, value_reg);
@@ -773,9 +728,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ Move(scratch1, receiver_map);
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -941,7 +893,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
Operand(dictionary, index, times_pointer_size,
kValueOffset - kHeapObjectTag));
__ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(scratch2(), scratch3());
+ __ cmpp(scratch2(), scratch3());
__ j(not_equal, &miss);
}
@@ -970,15 +922,6 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization,
- Handle<Map> receiver_map) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch1(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -992,22 +935,22 @@ void LoadStubCompiler::GenerateLoadCallback(
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- __ push(receiver()); // receiver
+ __ Push(receiver()); // receiver
if (heap()->InNewSpace(callback->data())) {
ASSERT(!scratch2().is(reg));
__ Move(scratch2(), callback);
- __ push(FieldOperand(scratch2(),
+ __ Push(FieldOperand(scratch2(),
ExecutableAccessorInfo::kDataOffset)); // data
} else {
__ Push(Handle<Object>(callback->data(), isolate()));
}
ASSERT(!kScratchRegister.is(reg));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ push(kScratchRegister); // return value
- __ push(kScratchRegister); // return value default
+ __ Push(kScratchRegister); // return value
+ __ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
- __ push(reg); // holder
- __ push(name()); // name
+ __ Push(reg); // holder
+ __ Push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const PropertyAccessorInfo& to the C++ callback.
@@ -1075,10 +1018,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ push(receiver());
+ __ Push(receiver());
}
- __ push(holder_reg);
- __ push(this->name());
+ __ Push(holder_reg);
+ __ Push(this->name());
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
@@ -1096,10 +1039,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
__ ret(0);
__ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
+ __ Pop(this->name());
+ __ Pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver());
+ __ Pop(receiver());
}
// Leave the internal frame.
@@ -1141,11 +1084,11 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
__ PopReturnAddressTo(scratch1());
- __ push(receiver());
- __ push(holder_reg);
+ __ Push(receiver());
+ __ Push(holder_reg);
__ Push(callback); // callback info
__ Push(name);
- __ push(value());
+ __ Push(value());
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
@@ -1158,24 +1101,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch1(), 1, values);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -1183,20 +1108,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
- Register receiver = rdx;
- Register value = rax;
// Save value register, so we can restore it later.
- __ push(value);
+ __ Push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
@@ -1205,8 +1126,8 @@ void StoreStubCompiler::GenerateStoreViaSetter(
__ movp(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ push(receiver);
- __ push(value);
+ __ Push(receiver);
+ __ Push(value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@@ -1218,7 +1139,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
}
// We have to return the passed value, not the return value of the setter.
- __ pop(rax);
+ __ Pop(rax);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1235,9 +1156,9 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
__ PopReturnAddressTo(scratch1());
- __ push(receiver());
- __ push(this->name());
- __ push(value());
+ __ Push(receiver());
+ __ Push(this->name());
+ __ Push(value());
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
@@ -1250,6 +1171,20 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ PopReturnAddressTo(scratch1());
+ __ Push(receiver());
+ __ Push(value());
+ __ PushReturnAddressFrom(scratch1());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -1314,16 +1249,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
+Register StoreStubCompiler::value() {
+ return rax;
+}
+
+
Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
return registers;
}
@@ -1351,7 +1291,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
__ movp(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ push(receiver);
+ __ Push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
diff --git a/deps/v8/src/zone-allocator.h b/deps/v8/src/zone-allocator.h
index 5245c6b1bf..7ed1713904 100644
--- a/deps/v8/src/zone-allocator.h
+++ b/deps/v8/src/zone-allocator.h
@@ -50,7 +50,9 @@ class zone_allocator {
explicit zone_allocator(Zone* zone) throw() : zone_(zone) {}
explicit zone_allocator(const zone_allocator& other) throw()
: zone_(other.zone_) {}
- template<typename U> zone_allocator(const zone_allocator<U>&) throw() {}
+ template<typename U> zone_allocator(const zone_allocator<U>& other) throw()
+ : zone_(other.zone_) {}
+ template<typename U> friend class zone_allocator;
pointer address(reference x) const {return &x;}
const_pointer address(const_reference x) const {return &x;}
@@ -69,9 +71,17 @@ class zone_allocator {
void construct(pointer p, const T& val) {
new(static_cast<void*>(p)) T(val);
}
- void destroy(pointer p) { (static_cast<T*>(p))->~T(); }
+ void destroy(pointer p) { p->~T(); }
+
+ bool operator==(zone_allocator const& other) {
+ return zone_ == other.zone_;
+ }
+ bool operator!=(zone_allocator const& other) {
+ return zone_ != other.zone_;
+ }
private:
+ zone_allocator();
Zone* zone_;
};
diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h
index f257382a2d..9b82c05408 100644
--- a/deps/v8/src/zone-inl.h
+++ b/deps/v8/src/zone-inl.h
@@ -30,6 +30,12 @@
#include "zone.h"
+#ifdef V8_USE_ADDRESS_SANITIZER
+ #include <sanitizer/asan_interface.h>
+#else
+ #define ASAN_UNPOISON_MEMORY_REGION(start, size) ((void) 0)
+#endif
+
#include "counters.h"
#include "isolate.h"
#include "utils.h"
@@ -39,6 +45,9 @@ namespace v8 {
namespace internal {
+static const int kASanRedzoneBytes = 24; // Must be a multiple of 8.
+
+
inline void* Zone::New(int size) {
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
@@ -54,12 +63,25 @@ inline void* Zone::New(int size) {
// Check if the requested size is available without expanding.
Address result = position_;
- if (size > limit_ - position_) {
- result = NewExpand(size);
+ int size_with_redzone =
+#ifdef V8_USE_ADDRESS_SANITIZER
+ size + kASanRedzoneBytes;
+#else
+ size;
+#endif
+
+ if (size_with_redzone > limit_ - position_) {
+ result = NewExpand(size_with_redzone);
} else {
- position_ += size;
+ position_ += size_with_redzone;
}
+#ifdef V8_USE_ADDRESS_SANITIZER
+ Address redzone_position = result + size;
+ ASSERT(redzone_position + kASanRedzoneBytes == position_);
+ ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
+#endif
+
// Check that the result has the proper alignment and return it.
ASSERT(IsAddressAligned(result, kAlignment, 0));
allocation_size_ += size;
@@ -69,6 +91,7 @@ inline void* Zone::New(int size) {
template <typename T>
T* Zone::NewArray(int length) {
+ CHECK(std::numeric_limits<int>::max() / static_cast<int>(sizeof(T)) > length);
return static_cast<T*>(New(length * sizeof(T)));
}
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index 417f895e5a..4f91371294 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -104,6 +104,8 @@ void Zone::DeleteAll() {
} else {
int size = current->size();
#ifdef DEBUG
+ // Un-poison first so the zapping doesn't trigger ASan complaints.
+ ASAN_UNPOISON_MEMORY_REGION(current, size);
// Zap the entire current segment (including the header).
memset(current, kZapDeadByte, size);
#endif
@@ -120,6 +122,8 @@ void Zone::DeleteAll() {
Address start = keep->start();
position_ = RoundUp(start, kAlignment);
limit_ = keep->end();
+ // Un-poison so we can re-use the segment later.
+ ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
#ifdef DEBUG
// Zap the contents of the kept segment (but not the header).
memset(start, kZapDeadByte, keep->capacity());
@@ -143,6 +147,8 @@ void Zone::DeleteKeptSegment() {
if (segment_head_ != NULL) {
int size = segment_head_->size();
#ifdef DEBUG
+ // Un-poison first so the zapping doesn't trigger ASan complaints.
+ ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
// Zap the entire kept segment (including the header).
memset(segment_head_, kZapDeadByte, size);
#endif
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index bd7cc39b0c..83421b3963 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -38,6 +38,11 @@
namespace v8 {
namespace internal {
+#if defined(__has_feature)
+ #if __has_feature(address_sanitizer)
+ #define V8_USE_ADDRESS_SANITIZER
+ #endif
+#endif
class Segment;
class Isolate;
@@ -89,8 +94,13 @@ class Zone {
// All pointers returned from New() have this alignment. In addition, if the
// object being allocated has a size that is divisible by 8 then its alignment
- // will be 8.
+ // will be 8. ASan requires 8-byte alignment.
+#ifdef V8_USE_ADDRESS_SANITIZER
+ static const int kAlignment = 8;
+ STATIC_ASSERT(kPointerSize <= 8);
+#else
static const int kAlignment = kPointerSize;
+#endif
// Never allocate segments smaller than this size in bytes.
static const int kMinimumSegmentSize = 8 * KB;