summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2013-06-11 23:45:46 +0200
committerBen Noordhuis <info@bnoordhuis.nl>2013-06-11 23:46:00 +0200
commit6dd78074a3c0a7579ca5e919021587c22ff763ae (patch)
treee225460f8e76126f4e4b2e1809dbd4c9c2ba511b
parent9ae1d182ba98629ac7c7b9100022ac93133494b7 (diff)
downloadnode-new-6dd78074a3c0a7579ca5e919021587c22ff763ae.tar.gz
v8: upgrade to v3.19.13
-rw-r--r--deps/v8/ChangeLog102
-rw-r--r--deps/v8/Makefile.nacl2
-rw-r--r--deps/v8/build/common.gypi188
-rw-r--r--deps/v8/include/v8-profiler.h24
-rw-r--r--deps/v8/include/v8.h567
-rw-r--r--deps/v8/samples/lineprocessor.cc27
-rw-r--r--deps/v8/samples/process.cc110
-rw-r--r--deps/v8/samples/shell.cc50
-rw-r--r--deps/v8/src/accessors.cc16
-rw-r--r--deps/v8/src/api.cc344
-rw-r--r--deps/v8/src/api.h12
-rw-r--r--deps/v8/src/arguments.h24
-rw-r--r--deps/v8/src/arm/assembler-arm.cc3
-rw-r--r--deps/v8/src/arm/builtins-arm.cc50
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc361
-rw-r--r--deps/v8/src/arm/codegen-arm.cc44
-rw-r--r--deps/v8/src/arm/codegen-arm.h2
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc2
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc174
-rw-r--r--deps/v8/src/arm/ic-arm.cc23
-rw-r--r--deps/v8/src/arm/lithium-arm.cc206
-rw-r--r--deps/v8/src/arm/lithium-arm.h91
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc698
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h12
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc8
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc35
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h11
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc57
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h4
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc200
-rw-r--r--deps/v8/src/array.js188
-rw-r--r--deps/v8/src/assert-scope.h168
-rw-r--r--deps/v8/src/ast.cc91
-rw-r--r--deps/v8/src/ast.h392
-rw-r--r--deps/v8/src/atomicops_internals_mips_gcc.h17
-rw-r--r--deps/v8/src/bootstrapper.cc55
-rw-r--r--deps/v8/src/bootstrapper.h9
-rw-r--r--deps/v8/src/builtins.cc87
-rw-r--r--deps/v8/src/builtins.h2
-rw-r--r--deps/v8/src/checks.cc2
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc344
-rw-r--r--deps/v8/src/code-stubs.cc103
-rw-r--r--deps/v8/src/code-stubs.h333
-rw-r--r--deps/v8/src/codegen.cc12
-rw-r--r--deps/v8/src/codegen.h12
-rw-r--r--deps/v8/src/compiler.cc38
-rw-r--r--deps/v8/src/compiler.h2
-rw-r--r--deps/v8/src/contexts.h8
-rw-r--r--deps/v8/src/cpu-profiler.cc3
-rw-r--r--deps/v8/src/d8-posix.cc97
-rw-r--r--deps/v8/src/d8.cc236
-rw-r--r--deps/v8/src/d8.h94
-rw-r--r--deps/v8/src/debug.cc49
-rw-r--r--deps/v8/src/debug.h4
-rw-r--r--deps/v8/src/deoptimizer.cc88
-rw-r--r--deps/v8/src/deoptimizer.h39
-rw-r--r--deps/v8/src/disassembler.cc4
-rw-r--r--deps/v8/src/elements.cc8
-rw-r--r--deps/v8/src/execution.cc5
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc32
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.h4
-rw-r--r--deps/v8/src/extensions/gc-extension.cc3
-rw-r--r--deps/v8/src/extensions/gc-extension.h2
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc6
-rw-r--r--deps/v8/src/extensions/statistics-extension.h2
-rw-r--r--deps/v8/src/factory.cc54
-rw-r--r--deps/v8/src/factory.h23
-rw-r--r--deps/v8/src/flag-definitions.h20
-rw-r--r--deps/v8/src/frames.cc2
-rw-r--r--deps/v8/src/full-codegen.cc15
-rw-r--r--deps/v8/src/full-codegen.h5
-rw-r--r--deps/v8/src/gdb-jit.cc2
-rw-r--r--deps/v8/src/generator.js19
-rw-r--r--deps/v8/src/global-handles.cc146
-rw-r--r--deps/v8/src/global-handles.h15
-rw-r--r--deps/v8/src/handles-inl.h113
-rw-r--r--deps/v8/src/handles.cc21
-rw-r--r--deps/v8/src/handles.h54
-rw-r--r--deps/v8/src/heap-inl.h64
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc10
-rw-r--r--deps/v8/src/heap.cc293
-rw-r--r--deps/v8/src/heap.h139
-rw-r--r--deps/v8/src/hydrogen-environment-liveness.cc267
-rw-r--r--deps/v8/src/hydrogen-environment-liveness.h94
-rw-r--r--deps/v8/src/hydrogen-gvn.cc855
-rw-r--r--deps/v8/src/hydrogen-gvn.h123
-rw-r--r--deps/v8/src/hydrogen-instructions.cc505
-rw-r--r--deps/v8/src/hydrogen-instructions.h611
-rw-r--r--deps/v8/src/hydrogen.cc2256
-rw-r--r--deps/v8/src/hydrogen.h301
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h7
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc2
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h2
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc52
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc397
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc44
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h2
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc2
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc170
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc24
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc594
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h12
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc8
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc203
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h61
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc41
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h18
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc82
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h4
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc195
-rw-r--r--deps/v8/src/ic.cc180
-rw-r--r--deps/v8/src/ic.h16
-rw-r--r--deps/v8/src/incremental-marking.cc9
-rw-r--r--deps/v8/src/incremental-marking.h6
-rw-r--r--deps/v8/src/interpreter-irregexp.cc2
-rw-r--r--deps/v8/src/isolate.cc44
-rw-r--r--deps/v8/src/isolate.h9
-rw-r--r--deps/v8/src/json-parser.h12
-rw-r--r--deps/v8/src/json-stringifier.h33
-rw-r--r--deps/v8/src/jsregexp.cc79
-rw-r--r--deps/v8/src/jsregexp.h37
-rw-r--r--deps/v8/src/lithium-allocator.cc5
-rw-r--r--deps/v8/src/lithium.cc10
-rw-r--r--deps/v8/src/lithium.h7
-rw-r--r--deps/v8/src/liveedit.cc31
-rw-r--r--deps/v8/src/log-utils.cc4
-rw-r--r--deps/v8/src/log.cc104
-rw-r--r--deps/v8/src/mark-compact.cc72
-rw-r--r--deps/v8/src/mark-compact.h12
-rw-r--r--deps/v8/src/marking-thread.cc3
-rw-r--r--deps/v8/src/messages.cc18
-rw-r--r--deps/v8/src/messages.h1
-rw-r--r--deps/v8/src/messages.js14
-rw-r--r--deps/v8/src/mips/assembler-mips.cc4
-rw-r--r--deps/v8/src/mips/builtins-mips.cc66
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc394
-rw-r--r--deps/v8/src/mips/codegen-mips.cc44
-rw-r--r--deps/v8/src/mips/codegen-mips.h2
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc2
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc179
-rw-r--r--deps/v8/src/mips/ic-mips.cc23
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc446
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h8
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.cc8
-rw-r--r--deps/v8/src/mips/lithium-mips.cc176
-rw-r--r--deps/v8/src/mips/lithium-mips.h65
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc43
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h11
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc56
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.h4
-rw-r--r--deps/v8/src/mips/simulator-mips.cc43
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc329
-rw-r--r--deps/v8/src/mksnapshot.cc9
-rw-r--r--deps/v8/src/object-observe.js17
-rw-r--r--deps/v8/src/objects-debug.cc9
-rw-r--r--deps/v8/src/objects-inl.h109
-rw-r--r--deps/v8/src/objects-printer.cc17
-rw-r--r--deps/v8/src/objects-visiting-inl.h80
-rw-r--r--deps/v8/src/objects-visiting.cc8
-rw-r--r--deps/v8/src/objects-visiting.h7
-rw-r--r--deps/v8/src/objects.cc743
-rw-r--r--deps/v8/src/objects.h220
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc4
-rw-r--r--deps/v8/src/parser.cc505
-rw-r--r--deps/v8/src/parser.h44
-rw-r--r--deps/v8/src/platform-linux.cc4
-rw-r--r--deps/v8/src/platform-openbsd.cc4
-rw-r--r--deps/v8/src/platform-posix.cc17
-rw-r--r--deps/v8/src/preparser.cc18
-rw-r--r--deps/v8/src/preparser.h6
-rw-r--r--deps/v8/src/prettyprinter.cc19
-rw-r--r--deps/v8/src/profile-generator.cc7
-rw-r--r--deps/v8/src/profile-generator.h2
-rw-r--r--deps/v8/src/property-details.h34
-rw-r--r--deps/v8/src/property.h28
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.cc28
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.h6
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc15
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.h5
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc4
-rw-r--r--deps/v8/src/regexp-macro-assembler.h11
-rw-r--r--deps/v8/src/rewriter.cc5
-rw-r--r--deps/v8/src/runtime.cc688
-rw-r--r--deps/v8/src/runtime.h12
-rw-r--r--deps/v8/src/safepoint-table.h2
-rw-r--r--deps/v8/src/sampler.cc272
-rw-r--r--deps/v8/src/scanner.h13
-rw-r--r--deps/v8/src/scopeinfo.cc15
-rw-r--r--deps/v8/src/scopes.cc22
-rw-r--r--deps/v8/src/scopes.h20
-rw-r--r--deps/v8/src/serialize.cc18
-rw-r--r--deps/v8/src/serialize.h7
-rw-r--r--deps/v8/src/store-buffer.cc4
-rw-r--r--deps/v8/src/string-stream.cc11
-rw-r--r--deps/v8/src/string-stream.h6
-rw-r--r--deps/v8/src/stub-cache.cc21
-rw-r--r--deps/v8/src/stub-cache.h11
-rw-r--r--deps/v8/src/sweeper-thread.cc4
-rw-r--r--deps/v8/src/type-info.cc120
-rw-r--r--deps/v8/src/type-info.h32
-rw-r--r--deps/v8/src/typedarray.js2
-rw-r--r--deps/v8/src/types.cc289
-rw-r--r--deps/v8/src/types.h199
-rw-r--r--deps/v8/src/typing.cc518
-rw-r--r--deps/v8/src/typing.h77
-rw-r--r--deps/v8/src/uri.h12
-rw-r--r--deps/v8/src/utils.h33
-rw-r--r--deps/v8/src/v8.cc1
-rw-r--r--deps/v8/src/v8.h16
-rw-r--r--deps/v8/src/v8conversions.h2
-rw-r--r--deps/v8/src/v8globals.h1
-rw-r--r--deps/v8/src/v8natives.js83
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/assembler-x64.cc2
-rw-r--r--deps/v8/src/x64/builtins-x64.cc48
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc351
-rw-r--r--deps/v8/src/x64/codegen-x64.cc40
-rw-r--r--deps/v8/src/x64/codegen-x64.h2
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc2
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc165
-rw-r--r--deps/v8/src/x64/ic-x64.cc17
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc563
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h8
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc8
-rw-r--r--deps/v8/src/x64/lithium-x64.cc191
-rw-r--r--deps/v8/src/x64/lithium-x64.h57
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc57
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h18
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc97
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h4
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc188
-rw-r--r--deps/v8/test/cctest/cctest.cc25
-rw-r--r--deps/v8/test/cctest/cctest.gyp4
-rw-r--r--deps/v8/test/cctest/cctest.status24
-rw-r--r--deps/v8/test/cctest/test-alloc.cc12
-rwxr-xr-x[-rw-r--r--]deps/v8/test/cctest/test-api.cc1257
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc6
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc6
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc4
-rw-r--r--deps/v8/test/cctest/test-compare-nil-ic-stub.cc3
-rw-r--r--deps/v8/test/cctest/test-compiler.cc34
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc340
-rw-r--r--deps/v8/test/cctest/test-debug.cc27
-rw-r--r--deps/v8/test/cctest/test-declarative-accessors.cc6
-rw-r--r--deps/v8/test/cctest/test-decls.cc26
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc32
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc4
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc8
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc68
-rw-r--r--deps/v8/test/cctest/test-hashing.cc3
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc57
-rw-r--r--deps/v8/test/cctest/test-heap.cc259
-rw-r--r--deps/v8/test/cctest/test-lockers.cc31
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc4
-rw-r--r--deps/v8/test/cctest/test-log.cc21
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc41
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc3
-rw-r--r--deps/v8/test/cctest/test-parsing.cc58
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc5
-rw-r--r--deps/v8/test/cctest/test-random.cc8
-rw-r--r--deps/v8/test/cctest/test-regexp.cc79
-rw-r--r--deps/v8/test/cctest/test-serialize.cc45
-rw-r--r--deps/v8/test/cctest/test-strings.cc79
-rw-r--r--deps/v8/test/cctest/test-symbols.cc4
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc4
-rw-r--r--deps/v8/test/cctest/test-threads.cc4
-rw-r--r--deps/v8/test/cctest/test-types.cc533
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc3
-rw-r--r--deps/v8/test/cctest/test-weaktypedarrays.cc380
-rw-r--r--deps/v8/test/mjsunit/allocation-site-info.js19
-rw-r--r--deps/v8/test/mjsunit/debug-compile-event.js4
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js6
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js5
-rw-r--r--deps/v8/test/mjsunit/debug-set-script-source.js4
-rw-r--r--deps/v8/test/mjsunit/debug-setbreakpoint.js2
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part1.js4
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part2.js6
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part3.js4
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part4.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-iteration.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-objects.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-runtime.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/iteration-semantics.js327
-rw-r--r--deps/v8/test/mjsunit/harmony/iteration-syntax.js65
-rw-r--r--deps/v8/test/mjsunit/harmony/object-observe.js172
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status4
-rw-r--r--deps/v8/test/mjsunit/object-define-property.js9
-rw-r--r--deps/v8/test/mjsunit/object-freeze.js123
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1853.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2132.js (renamed from deps/v8/test/mjsunit/bugs/bug-618.js)31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-237617.js (renamed from deps/v8/src/builtins-decls.h)21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2690.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2717.js51
-rw-r--r--deps/v8/test/mjsunit/regress/regress-618.js (renamed from deps/v8/test/mjsunit/bugs/618.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-convert-hole.js109
-rw-r--r--deps/v8/test/mjsunit/regress/regress-copy-hole-to-field.js57
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-240032.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-242924.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-243868.js46
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-244461.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-245424.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-248025.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-int32-truncation.js61
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js45
-rw-r--r--deps/v8/test/mjsunit/smi-representation.js68
-rw-r--r--deps/v8/test/mjsunit/stack-traces.js4
-rw-r--r--deps/v8/test/mjsunit/string-fromcharcode.js4
-rw-r--r--deps/v8/test/mjsunit/track-fields.js80
-rw-r--r--deps/v8/tools/blink_tests/TestExpectations23
-rw-r--r--deps/v8/tools/gyp/v8.gyp9
310 files changed, 17274 insertions, 10701 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 39885e7831..6117e56b76 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,105 @@
+2013-06-11: Version 3.19.13
+
+ Performance and stability improvements on all platforms.
+
+
+2013-06-10: Version 3.19.12
+
+ Fixed arguments array access. (Chromium issue 247303)
+
+ Fixed bug in LookupForWrite. (Chromium issue 242332)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-06-07: Version 3.19.11
+
+ Performance and stability improvements on all platforms.
+
+
+2013-06-06: Version 3.19.10
+
+ Performance and stability improvements on all platforms.
+
+
+2013-06-05: Version 3.19.9
+
+ Implemented Load IC support for loading properties from primitive
+ values to avoid perpetual soft deopts. (Chromium issue 242512)
+
+ Implemented Freeing of PerThreadAssertData when possible to avoid
+ memory leak. (Chromium issue 246567)
+
+ Removed V8_USE_OLD_STYLE_PERSISTENT_HANDLE_VISITORS.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-06-03: Version 3.19.8
+
+ Fixed bug with inlining 'Array' function. (Chromium issue 244461)
+
+ Fixed initialization of literal objects. (Chromium issue 245424)
+
+ Fixed function name inferred inside closures. (Chromium issue 224884)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-05-31: Version 3.19.7
+
+ Added support for //# sourceURL similar to deprecated //@ sourceURL one.
+ (issue 2702)
+
+ Made sure IfBuilder::Return clears the current block.
+ (Chromium issue 243868)
+
+ Fixed two CPU profiler tests on ARM and MIPS simulators
+ (issue 2628)
+
+ Fixed idle incremental GC for large objects.
+ (Chromium issue 241815)
+
+ Disabled --optimize-constructed-arrays due to crashes
+ (Chromium issue 244461)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-05-28: Version 3.19.6
+
+ Fixed IfBuilder::Deopt to clear the current block
+ (Chromium issue 243868).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-05-27: Version 3.19.5
+
+ Reset regexp parser flag after scanning ahead for capture groups.
+ (issue 2690)
+
+ Removed flakiness in test-cpu-profiler/SampleWhenFrameIsNotSetup.
+ (issue 2628)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-05-24: Version 3.19.4
+
+ Fixed edge case in stack trace formatting. (Chromium issue 237617)
+
+ Fixed embedded new-space pointer in LCmpObjectEqAndBranch. (Chromium
+ issue 240032)
+
+ Made Object.freeze fast (issue 1858, Chromium issue 115960)
+
+ Fixed bogus deopt in BuildEmitDeepCopy for holey arrays. (Chromium issue
+ 242924)
+
+ Performance and stability improvements on all platforms.
+
+
2013-05-22: Version 3.19.3
Performance and stability improvements on all platforms.
diff --git a/deps/v8/Makefile.nacl b/deps/v8/Makefile.nacl
index e8fc3d252a..0c98021ed1 100644
--- a/deps/v8/Makefile.nacl
+++ b/deps/v8/Makefile.nacl
@@ -46,7 +46,7 @@ else
endif
endif
-TOOLCHAIN_PATH = ${NACL_SDK_ROOT}/toolchain
+TOOLCHAIN_PATH = $(realpath ${NACL_SDK_ROOT}/toolchain)
NACL_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(ARCH), nacl_ia32)
diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi
index ad6ccdf973..127749a473 100644
--- a/deps/v8/build/common.gypi
+++ b/deps/v8/build/common.gypi
@@ -129,22 +129,13 @@
'defines': [
'V8_TARGET_ARCH_ARM',
],
- 'variables': {
- 'armsimulator': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: arm" && echo "no" || echo "yes")',
- },
- 'conditions': [
- [ 'v8_can_use_unaligned_accesses=="true"', {
- 'defines': [
- 'CAN_USE_UNALIGNED_ACCESSES=1',
- ],
- }, {
- 'defines': [
- 'CAN_USE_UNALIGNED_ACCESSES=0',
- ],
- }],
- ['armsimulator=="no"', {
- 'target_conditions': [
- ['_toolset=="target"', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'variables': {
+ 'armcompiler': '<!($(echo ${CXX_host:-$(which g++)}) -v 2>&1 | grep -q "^Target: arm" && echo "yes" || echo "no")',
+ },
+ 'conditions': [
+ ['armcompiler=="yes"', {
'conditions': [
[ 'armv7==1', {
'cflags': ['-march=armv7-a',],
@@ -159,9 +150,9 @@
[ 'arm_fpu!="default"', {
'cflags': ['-mfpu=<(arm_fpu)',],
}],
- ]
+ ],
}],
- ]
+ ],
}],
[ 'arm_float_abi!="default"', {
'cflags': ['-mfloat-abi=<(arm_float_abi)',],
@@ -172,63 +163,149 @@
[ 'arm_thumb==0', {
'cflags': ['-marm',],
}],
+ [ 'arm_test=="on"', {
+ 'defines': [
+ 'ARM_TEST',
+ ],
+ }],
+ ],
+ }, {
+ # armcompiler=="no"
+ 'conditions': [
+ [ 'armv7==1 or armv7=="default"', {
+ 'defines': [
+ 'CAN_USE_ARMV7_INSTRUCTIONS=1',
+ ],
+ 'conditions': [
+ [ 'arm_fpu=="default"', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ ],
+ }],
+ [ 'arm_fpu=="vfpv3-d16"', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ ],
+ }],
+ [ 'arm_fpu=="vfpv3"', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ 'CAN_USE_VFP32DREGS',
+ ],
+ }],
+ [ 'arm_fpu=="neon" or arm_neon==1', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ 'CAN_USE_VFP32DREGS',
+ ],
+ }],
+ ],
+ }],
+ [ 'arm_float_abi=="hard"', {
+ 'defines': [
+ 'USE_EABI_HARDFLOAT=1',
+ ],
+ }],
+ [ 'arm_float_abi=="softfp" or arm_float_abi=="default"', {
+ 'defines': [
+ 'USE_EABI_HARDFLOAT=0',
+ ],
+ }],
],
- }],
- ],
- 'conditions': [
- [ 'arm_test=="on"', {
'defines': [
'ARM_TEST',
],
}],
],
- }],
- ['armsimulator=="yes"', {
- 'defines': [
- 'ARM_TEST',
- ],
+ }], # _toolset=="host"
+ ['_toolset=="target"', {
+ 'variables': {
+ 'armcompiler': '<!($(echo ${CXX_target:-<(CXX)}) -v 2>&1 | grep -q "^Target: arm" && echo "yes" || echo "no")',
+ },
'conditions': [
- [ 'armv7==1 or armv7=="default"', {
- 'defines': [
- 'CAN_USE_ARMV7_INSTRUCTIONS=1',
- ],
+ ['armcompiler=="yes"', {
'conditions': [
- [ 'arm_fpu=="default"', {
+ [ 'armv7==1', {
+ 'cflags': ['-march=armv7-a',],
+ }],
+ [ 'armv7==1 or armv7=="default"', {
+ 'conditions': [
+ [ 'arm_neon==1', {
+ 'cflags': ['-mfpu=neon',],
+ },
+ {
+ 'conditions': [
+ [ 'arm_fpu!="default"', {
+ 'cflags': ['-mfpu=<(arm_fpu)',],
+ }],
+ ],
+ }],
+ ],
+ }],
+ [ 'arm_float_abi!="default"', {
+ 'cflags': ['-mfloat-abi=<(arm_float_abi)',],
+ }],
+ [ 'arm_thumb==1', {
+ 'cflags': ['-mthumb',],
+ }],
+ [ 'arm_thumb==0', {
+ 'cflags': ['-marm',],
+ }],
+ [ 'arm_test=="on"', {
'defines': [
- 'CAN_USE_VFP3_INSTRUCTIONS',
+ 'ARM_TEST',
],
}],
- [ 'arm_fpu=="vfpv3-d16"', {
+ ],
+ }, {
+ # armcompiler=="no"
+ 'conditions': [
+ [ 'armv7==1 or armv7=="default"', {
'defines': [
- 'CAN_USE_VFP3_INSTRUCTIONS',
+ 'CAN_USE_ARMV7_INSTRUCTIONS=1',
+ ],
+ 'conditions': [
+ [ 'arm_fpu=="default"', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ ],
+ }],
+ [ 'arm_fpu=="vfpv3-d16"', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ ],
+ }],
+ [ 'arm_fpu=="vfpv3"', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ 'CAN_USE_VFP32DREGS',
+ ],
+ }],
+ [ 'arm_fpu=="neon" or arm_neon==1', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ 'CAN_USE_VFP32DREGS',
+ ],
+ }],
],
}],
- [ 'arm_fpu=="vfpv3"', {
+ [ 'arm_float_abi=="hard"', {
'defines': [
- 'CAN_USE_VFP3_INSTRUCTIONS',
- 'CAN_USE_VFP32DREGS',
+ 'USE_EABI_HARDFLOAT=1',
],
}],
- [ 'arm_fpu=="neon" or arm_neon==1', {
+ [ 'arm_float_abi=="softfp" or arm_float_abi=="default"', {
'defines': [
- 'CAN_USE_VFP3_INSTRUCTIONS',
- 'CAN_USE_VFP32DREGS',
+ 'USE_EABI_HARDFLOAT=0',
],
}],
],
- }],
- [ 'arm_float_abi=="hard"', {
- 'defines': [
- 'USE_EABI_HARDFLOAT=1',
- ],
- }],
- [ 'arm_float_abi=="softfp" or arm_float_abi=="default"', {
'defines': [
- 'USE_EABI_HARDFLOAT=0',
+ 'ARM_TEST',
],
}],
- ]
- }],
+ ],
+ }], # _toolset=="target"
],
}], # v8_target_arch=="arm"
['v8_target_arch=="ia32"', {
@@ -453,6 +530,15 @@
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
+ 'cflags!': [
+ '-O2',
+ '-Os',
+ ],
+ 'cflags': [
+ '-fdata-sections',
+ '-ffunction-sections',
+ '-O3',
+ ],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index bc50b6f42b..d740df3bb7 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -184,19 +184,21 @@ class V8EXPORT CpuProfiler {
V8_DEPRECATED(static const CpuProfile* GetProfile(
int index,
Handle<Value> security_token = Handle<Value>()));
- /** Returns a profile by index. */
- const CpuProfile* GetCpuProfile(
+ /** Deprecated. Use GetCpuProfile with single parameter. */
+ V8_DEPRECATED(const CpuProfile* GetCpuProfile(
int index,
- Handle<Value> security_token = Handle<Value>());
+ Handle<Value> security_token));
+ /** Returns a profile by index. */
+ const CpuProfile* GetCpuProfile(int index);
/** Deprecated. Use FindProfile instead. */
V8_DEPRECATED(static const CpuProfile* FindProfile(
unsigned uid,
Handle<Value> security_token = Handle<Value>()));
/** Returns a profile by uid. */
- const CpuProfile* FindCpuProfile(
+ V8_DEPRECATED(const CpuProfile* FindCpuProfile(
unsigned uid,
- Handle<Value> security_token = Handle<Value>());
+ Handle<Value> security_token = Handle<Value>()));
/** Deprecated. Use StartCpuProfiling instead. */
V8_DEPRECATED(static void StartProfiling(Handle<String> title,
@@ -219,12 +221,16 @@ class V8EXPORT CpuProfiler {
Handle<String> title,
Handle<Value> security_token = Handle<Value>()));
/**
+ * Deprecated. Use StopCpuProfiling with one parameter instead.
+ */
+ V8_DEPRECATED(const CpuProfile* StopCpuProfiling(
+ Handle<String> title,
+ Handle<Value> security_token));
+ /**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
- const CpuProfile* StopCpuProfiling(
- Handle<String> title,
- Handle<Value> security_token = Handle<Value>());
+ const CpuProfile* StopCpuProfiling(Handle<String> title);
/** Deprecated. Use DeleteAllCpuProfiles instead. */
V8_DEPRECATED(static void DeleteAllProfiles());
@@ -438,7 +444,7 @@ class V8EXPORT HeapProfiler {
/** Deprecated. Use FindHeapSnapshot instead. */
V8_DEPRECATED(static const HeapSnapshot* FindSnapshot(unsigned uid));
/** Returns a profile by uid. */
- const HeapSnapshot* FindHeapSnapshot(unsigned uid);
+ V8_DEPRECATED(const HeapSnapshot* FindHeapSnapshot(unsigned uid));
/** Deprecated. Use GetObjectId instead. */
V8_DEPRECATED(static SnapshotObjectId GetSnapshotObjectId(
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index b3dff3fee1..c0bec79b01 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -214,11 +214,6 @@ class WeakReferenceCallbacks {
P* parameter);
};
-// TODO(svenpanne) Temporary definition until Chrome is in sync.
-typedef void (*NearDeathCallback)(Isolate* isolate,
- Persistent<Value> object,
- void* parameter);
-
// --- Handles ---
#define TYPE_CHECK(T, S) \
@@ -370,11 +365,11 @@ template <class T> class Handle {
#endif
private:
- template<class F>
- friend class Persistent;
- template<class F>
- friend class Local;
+ template<class F> friend class Persistent;
+ template<class F> friend class Local;
friend class Arguments;
+ template<class F> friend class FunctionCallbackInfo;
+ template<class F> friend class PropertyCallbackInfo;
friend class String;
friend class Object;
friend class AccessorInfo;
@@ -385,6 +380,7 @@ template <class T> class Handle {
friend class Context;
friend class InternalHandleHelper;
friend class LocalContext;
+ friend class HandleScope;
#ifndef V8_USE_UNSAFE_HANDLES
V8_INLINE(static Handle<T> New(Isolate* isolate, T* that));
@@ -458,17 +454,18 @@ template <class T> class Local : public Handle<T> {
#endif
private:
- template<class F>
- friend class Persistent;
- template<class F>
- friend class Handle;
+ template<class F> friend class Persistent;
+ template<class F> friend class Handle;
friend class Arguments;
+ template<class F> friend class FunctionCallbackInfo;
+ template<class F> friend class PropertyCallbackInfo;
friend class String;
friend class Object;
friend class AccessorInfo;
friend class Context;
friend class InternalHandleHelper;
friend class LocalContext;
+ friend class HandleScope;
V8_INLINE(static Local<T> New(Isolate* isolate, T* that));
};
@@ -516,6 +513,10 @@ template <class T> class Persistent // NOLINT
template <class S> V8_INLINE(Persistent(Isolate* isolate, Handle<S> that))
: val_(*New(isolate, that)) { }
+ template <class S> V8_INLINE(Persistent(Isolate* isolate,
+ Persistent<S>& that)) // NOLINT
+ : val_(*New(isolate, that)) { }
+
#else
/**
* Creates an empty persistent handle that doesn't point to any
@@ -563,6 +564,7 @@ template <class T> class Persistent // NOLINT
#endif
+#ifdef V8_USE_UNSAFE_HANDLES
template <class S> V8_INLINE(static Persistent<T> Cast(Persistent<S> that)) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
@@ -576,6 +578,22 @@ template <class T> class Persistent // NOLINT
return Persistent<S>::Cast(*this);
}
+#else
+ template <class S>
+ V8_INLINE(static Persistent<T>& Cast(Persistent<S>& that)) { // NOLINT
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
+ if (!that.IsEmpty()) T::Cast(*that);
+#endif
+ return reinterpret_cast<Persistent<T>&>(that);
+ }
+
+ template <class S> V8_INLINE(Persistent<S>& As()) { // NOLINT
+ return Persistent<S>::Cast(*this);
+ }
+#endif
+
V8_DEPRECATED(static Persistent<T> New(Handle<T> that));
/**
@@ -615,38 +633,47 @@ template <class T> class Persistent // NOLINT
* This handle's reference, and any other references to the storage
* cell remain and IsEmpty will still return false.
*/
- // TODO(dcarney): remove before cutover
- V8_INLINE(void Dispose(Isolate* isolate));
+ // TODO(dcarney): deprecate
+ V8_INLINE(void Dispose(Isolate* isolate)) { Dispose(); }
+ /**
+ * Make the reference to this object weak. When only weak handles
+ * refer to the object, the garbage collector will perform a
+ * callback to the given V8::NearDeathCallback function, passing
+ * it the object reference and the given parameters.
+ */
template<typename S, typename P>
V8_INLINE(void MakeWeak(
- Isolate* isolate,
P* parameters,
typename WeakReferenceCallbacks<S, P>::Revivable callback));
template<typename P>
V8_INLINE(void MakeWeak(
- Isolate* isolate,
P* parameters,
typename WeakReferenceCallbacks<T, P>::Revivable callback));
- /**
- * Make the reference to this object weak. When only weak handles
- * refer to the object, the garbage collector will perform a
- * callback to the given V8::NearDeathCallback function, passing
- * it the object reference and the given parameters.
- */
- // TODO(dcarney): remove before cutover
- V8_INLINE(void MakeWeak(Isolate* isolate,
- void* parameters,
- NearDeathCallback callback));
+ // TODO(dcarney): deprecate
+ template<typename S, typename P>
+ V8_INLINE(void MakeWeak(
+ Isolate* isolate,
+ P* parameters,
+ typename WeakReferenceCallbacks<S, P>::Revivable callback)) {
+ MakeWeak<S, P>(parameters, callback);
+ }
- V8_INLINE(void ClearWeak());
+ // TODO(dcarney): deprecate
+ template<typename P>
+ V8_INLINE(void MakeWeak(
+ Isolate* isolate,
+ P* parameters,
+ typename WeakReferenceCallbacks<T, P>::Revivable callback)) {
+ MakeWeak<P>(parameters, callback);
+ }
- // TODO(dcarney): remove before cutover
- V8_INLINE(void ClearWeak(Isolate* isolate));
+ V8_INLINE(void ClearWeak());
- V8_INLINE(void MarkIndependent());
+ // TODO(dcarney): deprecate
+ V8_INLINE(void ClearWeak(Isolate* isolate)) { ClearWeak(); }
/**
* Marks the reference to this object independent. Garbage collector is free
@@ -654,10 +681,10 @@ template <class T> class Persistent // NOLINT
* independent handle should not assume that it will be preceded by a global
* GC prologue callback or followed by a global GC epilogue callback.
*/
- // TODO(dcarney): remove before cutover
- V8_INLINE(void MarkIndependent(Isolate* isolate));
+ V8_INLINE(void MarkIndependent());
- V8_INLINE(void MarkPartiallyDependent());
+ // TODO(dcarney): deprecate
+ V8_INLINE(void MarkIndependent(Isolate* isolate)) { MarkIndependent(); }
/**
* Marks the reference to this object partially dependent. Partially dependent
@@ -667,49 +694,63 @@ template <class T> class Persistent // NOLINT
* external dependencies. This mark is automatically cleared after each
* garbage collection.
*/
- // TODO(dcarney): remove before cutover
- V8_INLINE(void MarkPartiallyDependent(Isolate* isolate));
+ V8_INLINE(void MarkPartiallyDependent());
- V8_INLINE(bool IsIndependent() const);
+ // TODO(dcarney): deprecate
+ V8_INLINE(void MarkPartiallyDependent(Isolate* isolate)) {
+ MarkPartiallyDependent();
+ }
- // TODO(dcarney): remove before cutover
- V8_INLINE(bool IsIndependent(Isolate* isolate) const);
+ V8_INLINE(bool IsIndependent() const);
- V8_INLINE(bool IsNearDeath() const);
+ // TODO(dcarney): deprecate
+ V8_INLINE(bool IsIndependent(Isolate* isolate) const) {
+ return IsIndependent();
+ }
/** Checks if the handle holds the only reference to an object. */
- // TODO(dcarney): remove before cutover
- V8_INLINE(bool IsNearDeath(Isolate* isolate) const);
+ V8_INLINE(bool IsNearDeath() const);
- V8_INLINE(bool IsWeak() const);
+ // TODO(dcarney): deprecate
+ V8_INLINE(bool IsNearDeath(Isolate* isolate) const) { return IsNearDeath(); }
/** Returns true if the handle's reference is weak. */
- // TODO(dcarney): remove before cutover
- V8_INLINE(bool IsWeak(Isolate* isolate) const);
+ V8_INLINE(bool IsWeak() const);
- V8_INLINE(void SetWrapperClassId(uint16_t class_id));
+ // TODO(dcarney): deprecate
+ V8_INLINE(bool IsWeak(Isolate* isolate) const) { return IsWeak(); }
/**
* Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
* description in v8-profiler.h for details.
*/
- // TODO(dcarney): remove before cutover
- V8_INLINE(void SetWrapperClassId(Isolate* isolate, uint16_t class_id));
+ V8_INLINE(void SetWrapperClassId(uint16_t class_id));
- V8_INLINE(uint16_t WrapperClassId() const);
+ // TODO(dcarney): deprecate
+ V8_INLINE(void SetWrapperClassId(Isolate* isolate, uint16_t class_id)) {
+ SetWrapperClassId(class_id);
+ }
/**
* Returns the class ID previously assigned to this handle or 0 if no class ID
* was previously assigned.
*/
- // TODO(dcarney): remove before cutover
- V8_INLINE(uint16_t WrapperClassId(Isolate* isolate) const);
+ V8_INLINE(uint16_t WrapperClassId() const);
+
+ // TODO(dcarney): deprecate
+ V8_INLINE(uint16_t WrapperClassId(Isolate* isolate) const) {
+ return WrapperClassId();
+ }
/**
* Disposes the current contents of the handle and replaces it.
*/
V8_INLINE(void Reset(Isolate* isolate, const Handle<T>& other));
+#ifndef V8_USE_UNSAFE_HANDLES
+ V8_INLINE(void Reset(Isolate* isolate, const Persistent<T>& other));
+#endif
+
/**
* Returns the underlying raw pointer and clears the handle. The caller is
* responsible of eventually destroying the underlying object (by creating a
@@ -722,10 +763,7 @@ template <class T> class Persistent // NOLINT
#ifndef V8_USE_UNSAFE_HANDLES
-#ifndef V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
private:
-#endif
// TODO(dcarney): make unlinkable before cutover
V8_INLINE(Persistent(const Persistent& that)) : val_(that.val_) {}
// TODO(dcarney): make unlinkable before cutover
@@ -748,21 +786,17 @@ template <class T> class Persistent // NOLINT
}
// TODO(dcarney): remove before cutover
V8_INLINE(T* operator*() const) { return val_; }
- public:
-#ifndef V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
private:
-#endif
// TODO(dcarney): remove before cutover
V8_INLINE(T* operator->() const) { return val_; }
public:
#endif
private:
- template<class F>
- friend class Handle;
- template<class F>
- friend class Local;
+ template<class F> friend class Handle;
+ template<class F> friend class Local;
+ template<class F> friend class ReturnValue;
friend class ImplementationUtilities;
friend class ObjectTemplate;
friend class Context;
@@ -1202,7 +1236,8 @@ class V8EXPORT StackFrame {
/**
* Returns the name of the resource that contains the script for the
* function for this StackFrame or sourceURL value if the script name
- * is undefined and its source ends with //@ sourceURL=... string.
+ * is undefined and its source ends with //# sourceURL=... string or
+ * deprecated //@ sourceURL=... string.
*/
Local<String> GetScriptNameOrSourceURL() const;
@@ -1435,6 +1470,8 @@ class V8EXPORT Value : public Data {
bool Equals(Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const;
+ template <class T> V8_INLINE(static Value* Cast(T* value));
+
private:
V8_INLINE(bool QuickIsUndefined() const);
V8_INLINE(bool QuickIsNull() const);
@@ -1490,11 +1527,19 @@ class V8EXPORT String : public Primitive {
V8_DEPRECATED(V8_INLINE(bool MayContainNonAscii()) const) { return true; }
/**
- * Returns whether this string contains only one byte data.
+ * Returns whether this string is known to contain only one byte data.
+ * Does not read the string.
+ * False negatives are possible.
*/
bool IsOneByte() const;
/**
+ * Returns whether this string contain only one byte data.
+ * Will read the entire string in some cases.
+ */
+ bool ContainsOnlyOneByte() const;
+
+ /**
* Write the contents of the string to an external buffer.
* If no arguments are given, expects the buffer to be large
* enough to hold the entire string and NULL terminator. Copies
@@ -2325,6 +2370,9 @@ class V8EXPORT Function : public Object {
static void CheckCast(Value* obj);
};
+#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
+#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
+#endif
/**
* An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
@@ -2333,30 +2381,102 @@ class V8EXPORT Function : public Object {
class V8EXPORT ArrayBuffer : public Object {
public:
/**
- * Data length in bytes.
+ * Allocator that V8 uses to allocate |ArrayBuffer|'s memory.
+ * The allocator is a global V8 setting. It should be set with
+ * V8::SetArrayBufferAllocator prior to creation of a first ArrayBuffer.
+ *
+ * This API is experimental and may change significantly.
*/
- size_t ByteLength() const;
+ class V8EXPORT Allocator { // NOLINT
+ public:
+ virtual ~Allocator() {}
+
+ /**
+ * Allocate |length| bytes. Return NULL if allocation is not successful.
+ */
+ virtual void* Allocate(size_t length) = 0;
+ /**
+ * Free the memory pointed to |data|. That memory is guaranteed to be
+ * previously allocated by |Allocate|.
+ */
+ virtual void Free(void* data) = 0;
+ };
+
/**
- * Raw pointer to the array buffer data
+ * The contents of an |ArrayBuffer|. Externalization of |ArrayBuffer|
+ * returns an instance of this class, populated, with a pointer to data
+ * and byte length.
+ *
+ * The Data pointer of ArrayBuffer::Contents is always allocated with
+ * Allocator::Allocate that is set with V8::SetArrayBufferAllocator.
+ *
+ * This API is experimental and may change significantly.
*/
- void* Data() const;
+ class V8EXPORT Contents { // NOLINT
+ public:
+ Contents() : data_(NULL), byte_length_(0) {}
+
+ void* Data() const { return data_; }
+ size_t ByteLength() const { return byte_length_; }
+
+ private:
+ void* data_;
+ size_t byte_length_;
+
+ friend class ArrayBuffer;
+ };
+
+
+ /**
+ * Data length in bytes.
+ */
+ size_t ByteLength() const;
/**
* Create a new ArrayBuffer. Allocate |byte_length| bytes.
* Allocated memory will be owned by a created ArrayBuffer and
- * will be deallocated when it is garbage-collected.
+ * will be deallocated when it is garbage-collected,
+ * unless the object is externalized.
*/
static Local<ArrayBuffer> New(size_t byte_length);
/**
* Create a new ArrayBuffer over an existing memory block.
+ * The created array buffer is immediately in externalized state.
* The memory block will not be reclaimed when a created ArrayBuffer
* is garbage-collected.
*/
static Local<ArrayBuffer> New(void* data, size_t byte_length);
+ /**
+ * Returns true if ArrayBuffer is extrenalized, that is, does not
+ * own its memory block.
+ */
+ bool IsExternal() const;
+
+ /**
+ * Neuters this ArrayBuffer and all its views (typed arrays).
+ * Neutering sets the byte length of the buffer and all typed arrays to zero,
+ * preventing JavaScript from ever accessing underlying backing store.
+ * ArrayBuffer should have been externalized.
+ */
+ void Neuter();
+
+ /**
+ * Make this ArrayBuffer external. The pointer to underlying memory block
+ * and byte length are returned as |Contents| structure. After ArrayBuffer
+ * had been etxrenalized, it does no longer owns the memory block. The caller
+ * should take steps to free memory when it is no longer needed.
+ *
+ * The memory block is guaranteed to be allocated with |Allocator::Allocate|
+ * that has been set with V8::SetArrayBufferAllocator.
+ */
+ Contents Externalize();
+
V8_INLINE(static ArrayBuffer* Cast(Value* obj));
+ static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
+
private:
ArrayBuffer();
static void CheckCast(Value* obj);
@@ -2735,23 +2855,33 @@ class V8EXPORT Template : public Data {
template<typename T>
-class V8EXPORT ReturnValue {
+class ReturnValue {
public:
- V8_INLINE(explicit ReturnValue(internal::Object** slot));
+ template <class S> V8_INLINE(ReturnValue(const ReturnValue<S>& that))
+ : value_(that.value_) {
+ TYPE_CHECK(T, S);
+ }
// Handle setters
- V8_INLINE(void Set(const Persistent<T>& handle));
- V8_INLINE(void Set(const Handle<T> handle));
+ template <typename S> V8_INLINE(void Set(const Persistent<S>& handle));
+ template <typename S> V8_INLINE(void Set(const Handle<S> handle));
// Fast primitive setters
- V8_INLINE(void Set(Isolate* isolate, bool value));
- V8_INLINE(void Set(Isolate* isolate, double i));
- V8_INLINE(void Set(Isolate* isolate, int32_t i));
- V8_INLINE(void Set(Isolate* isolate, uint32_t i));
+ V8_INLINE(void Set(bool value));
+ V8_INLINE(void Set(double i));
+ V8_INLINE(void Set(int32_t i));
+ V8_INLINE(void Set(uint32_t i));
// Fast JS primitive setters
- V8_INLINE(void SetNull(Isolate* isolate));
- V8_INLINE(void SetUndefined(Isolate* isolate));
+ V8_INLINE(void SetNull());
+ V8_INLINE(void SetUndefined());
+ V8_INLINE(void SetEmptyString());
+ // Convenience getter for Isolate
+ V8_INLINE(Isolate* GetIsolate());
+
private:
- V8_INLINE(void SetTrue(Isolate* isolate));
- V8_INLINE(void SetFalse(Isolate* isolate));
+ template<class F> friend class ReturnValue;
+ template<class F> friend class FunctionCallbackInfo;
+ template<class F> friend class PropertyCallbackInfo;
+ V8_INLINE(internal::Object* GetDefaultValue());
+ V8_INLINE(explicit ReturnValue(internal::Object** slot));
internal::Object** value_;
};
@@ -2763,7 +2893,7 @@ class V8EXPORT ReturnValue {
* the holder of the function.
*/
template<typename T>
-class V8EXPORT FunctionCallbackInfo {
+class FunctionCallbackInfo {
public:
V8_INLINE(int Length() const);
V8_INLINE(Local<Value> operator[](int i) const);
@@ -2775,16 +2905,17 @@ class V8EXPORT FunctionCallbackInfo {
V8_INLINE(Isolate* GetIsolate() const);
V8_INLINE(ReturnValue<T> GetReturnValue() const);
// This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 5;
+ static const int kArgsLength = 6;
protected:
friend class internal::FunctionCallbackArguments;
friend class internal::CustomArguments<FunctionCallbackInfo>;
static const int kReturnValueIndex = 0;
- static const int kIsolateIndex = -1;
- static const int kDataIndex = -2;
- static const int kCalleeIndex = -3;
- static const int kHolderIndex = -4;
+ static const int kReturnValueDefaultValueIndex = -1;
+ static const int kIsolateIndex = -2;
+ static const int kDataIndex = -3;
+ static const int kCalleeIndex = -4;
+ static const int kHolderIndex = -5;
V8_INLINE(FunctionCallbackInfo(internal::Object** implicit_args,
internal::Object** values,
@@ -2811,7 +2942,7 @@ class V8EXPORT Arguments : public FunctionCallbackInfo<Value> {
* of the property access.
*/
template<typename T>
-class V8EXPORT PropertyCallbackInfo {
+class PropertyCallbackInfo {
public:
V8_INLINE(Isolate* GetIsolate() const);
V8_INLINE(Local<Value> Data() const);
@@ -2819,7 +2950,7 @@ class V8EXPORT PropertyCallbackInfo {
V8_INLINE(Local<Object> Holder() const);
V8_INLINE(ReturnValue<T> GetReturnValue() const);
// This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 5;
+ static const int kArgsLength = 6;
protected:
friend class MacroAssembler;
@@ -2828,8 +2959,9 @@ class V8EXPORT PropertyCallbackInfo {
static const int kThisIndex = 0;
static const int kHolderIndex = -1;
static const int kDataIndex = -2;
- static const int kIsolateIndex = -3;
- static const int kReturnValueIndex = -4;
+ static const int kReturnValueIndex = -3;
+ static const int kReturnValueDefaultValueIndex = -4;
+ static const int kIsolateIndex = -5;
V8_INLINE(PropertyCallbackInfo(internal::Object** args))
: args_(args) { }
@@ -4050,7 +4182,7 @@ class V8EXPORT ExternalResourceVisitor { // NOLINT
class V8EXPORT PersistentHandleVisitor { // NOLINT
public:
virtual ~PersistentHandleVisitor() {}
- virtual void VisitPersistentHandle(Persistent<Value> value,
+ virtual void VisitPersistentHandle(Persistent<Value>* value,
uint16_t class_id) {}
};
@@ -4062,13 +4194,13 @@ class V8EXPORT PersistentHandleVisitor { // NOLINT
*/
class V8EXPORT AssertNoGCScope {
#ifndef DEBUG
- V8_INLINE(AssertNoGCScope(Isolate* isolate)) {}
+ // TODO(yangguo): remove isolate argument.
+ V8_INLINE(AssertNoGCScope(Isolate* isolate)) { }
#else
AssertNoGCScope(Isolate* isolate);
~AssertNoGCScope();
private:
- Isolate* isolate_;
- bool last_state_;
+ void* disallow_heap_allocation_;
#endif
};
@@ -4089,6 +4221,14 @@ class V8EXPORT V8 {
AllowCodeGenerationFromStringsCallback that);
/**
+ * Set allocator to use for ArrayBuffer memory.
+ * The allocator should be set only once. The allocator should be set
+ * before any code tha uses ArrayBuffers is executed.
+ * This allocator is used in all isolates.
+ */
+ static void SetArrayBufferAllocator(ArrayBuffer::Allocator* allocator);
+
+ /**
* Ignore out-of-memory exceptions.
*
* V8 running out of memory is treated as a fatal error by default.
@@ -4277,41 +4417,6 @@ class V8EXPORT V8 {
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
/**
- * Allows the host application to group objects together. If one
- * object in the group is alive, all objects in the group are alive.
- * After each garbage collection, object groups are removed. It is
- * intended to be used in the before-garbage-collection callback
- * function, for instance to simulate DOM tree connections among JS
- * wrapper objects. Object groups for all dependent handles need to
- * be provided for kGCTypeMarkSweepCompact collections, for all other
- * garbage collection types it is sufficient to provide object groups
- * for partially dependent handles only.
- * See v8-profiler.h for RetainedObjectInfo interface description.
- */
- // TODO(marja): deprecate AddObjectGroup. Use Isolate::SetObjectGroupId and
- // HeapProfiler::SetRetainedObjectInfo instead.
- static void AddObjectGroup(Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info = NULL);
- static void AddObjectGroup(Isolate* isolate,
- Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info = NULL);
-
- /**
- * Allows the host application to declare implicit references between
- * the objects: if |parent| is alive, all |children| are alive too.
- * After each garbage collection, all implicit references
- * are removed. It is intended to be used in the before-garbage-collection
- * callback function.
- */
- // TODO(marja): Deprecate AddImplicitReferences. Use
- // Isolate::SetReferenceFromGroup instead.
- static void AddImplicitReferences(Persistent<Object> parent,
- Persistent<Value>* children,
- size_t length);
-
- /**
* Initializes from snapshot if possible. Otherwise, attempts to
* initialize from scratch. This function is called implicitly if
* you use the API without calling it first.
@@ -4541,16 +4646,12 @@ class V8EXPORT V8 {
static internal::Object** GlobalizeReference(internal::Isolate* isolate,
internal::Object** handle);
- static void DisposeGlobal(internal::Isolate* isolate,
- internal::Object** global_handle);
+ static void DisposeGlobal(internal::Object** global_handle);
typedef WeakReferenceCallbacks<Value, void>::Revivable RevivableCallback;
- static void MakeWeak(internal::Isolate* isolate,
- internal::Object** global_handle,
+ static void MakeWeak(internal::Object** global_handle,
void* data,
- RevivableCallback weak_reference_callback,
- NearDeathCallback near_death_callback);
- static void ClearWeak(internal::Isolate* isolate,
- internal::Object** global_handle);
+ RevivableCallback weak_reference_callback);
+ static void ClearWeak(internal::Object** global_handle);
template <class T> friend class Handle;
template <class T> friend class Local;
@@ -4890,6 +4991,7 @@ class V8EXPORT Context {
explicit V8_INLINE(Scope(Handle<Context> context)) : context_(context) {
context_->Enter();
}
+ // TODO(dcarney): deprecate
V8_INLINE(Scope(Isolate* isolate, Persistent<Context>& context)) // NOLINT
#ifndef V8_USE_UNSAFE_HANDLES
: context_(Handle<Context>::New(isolate, context)) {
@@ -5231,7 +5333,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 127;
+ static const int kEmptyStringRootIndex = 130;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -5241,10 +5343,10 @@ class Internals {
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
- static const int kJSObjectType = 0xae;
+ static const int kJSObjectType = 0xaf;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
- static const int kForeignType = 0x86;
+ static const int kForeignType = 0x87;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
@@ -5438,15 +5540,8 @@ Persistent<T> Persistent<T>::New(Isolate* isolate, T* that) {
template <class T>
bool Persistent<T>::IsIndependent() const {
- return IsIndependent(Isolate::GetCurrent());
-}
-
-
-template <class T>
-bool Persistent<T>::IsIndependent(Isolate* isolate) const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
- if (!I::IsInitialized(isolate)) return false;
return I::GetNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
I::kNodeIsIndependentShift);
}
@@ -5454,15 +5549,8 @@ bool Persistent<T>::IsIndependent(Isolate* isolate) const {
template <class T>
bool Persistent<T>::IsNearDeath() const {
- return IsNearDeath(Isolate::GetCurrent());
-}
-
-
-template <class T>
-bool Persistent<T>::IsNearDeath(Isolate* isolate) const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
- if (!I::IsInitialized(isolate)) return false;
return I::GetNodeState(reinterpret_cast<internal::Object**>(this->val_)) ==
I::kNodeStateIsNearDeathValue;
}
@@ -5470,15 +5558,8 @@ bool Persistent<T>::IsNearDeath(Isolate* isolate) const {
template <class T>
bool Persistent<T>::IsWeak() const {
- return IsWeak(Isolate::GetCurrent());
-}
-
-
-template <class T>
-bool Persistent<T>::IsWeak(Isolate* isolate) const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
- if (!I::IsInitialized(isolate)) return false;
return I::GetNodeState(reinterpret_cast<internal::Object**>(this->val_)) ==
I::kNodeStateIsWeakValue;
}
@@ -5486,15 +5567,8 @@ bool Persistent<T>::IsWeak(Isolate* isolate) const {
template <class T>
void Persistent<T>::Dispose() {
- Dispose(Isolate::GetCurrent());
-}
-
-
-template <class T>
-void Persistent<T>::Dispose(Isolate* isolate) {
if (this->IsEmpty()) return;
- V8::DisposeGlobal(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(this->val_));
+ V8::DisposeGlobal(reinterpret_cast<internal::Object**>(this->val_));
#ifndef V8_USE_UNSAFE_HANDLES
val_ = 0;
#endif
@@ -5504,86 +5578,50 @@ void Persistent<T>::Dispose(Isolate* isolate) {
template <class T>
template <typename S, typename P>
void Persistent<T>::MakeWeak(
- Isolate* isolate,
P* parameters,
typename WeakReferenceCallbacks<S, P>::Revivable callback) {
TYPE_CHECK(S, T);
typedef typename WeakReferenceCallbacks<Value, void>::Revivable Revivable;
- V8::MakeWeak(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(this->val_),
+ V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_),
parameters,
- reinterpret_cast<Revivable>(callback),
- NULL);
+ reinterpret_cast<Revivable>(callback));
}
template <class T>
template <typename P>
void Persistent<T>::MakeWeak(
- Isolate* isolate,
P* parameters,
typename WeakReferenceCallbacks<T, P>::Revivable callback) {
- MakeWeak<T, P>(isolate, parameters, callback);
+ MakeWeak<T, P>(parameters, callback);
}
template <class T>
-void Persistent<T>::MakeWeak(Isolate* isolate,
- void* parameters,
- NearDeathCallback callback) {
- V8::MakeWeak(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(this->val_),
- parameters,
- NULL,
- callback);
-}
-
-template <class T>
void Persistent<T>::ClearWeak() {
- ClearWeak(Isolate::GetCurrent());
+ V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_));
}
-template <class T>
-void Persistent<T>::ClearWeak(Isolate* isolate) {
- V8::ClearWeak(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(this->val_));
-}
template <class T>
void Persistent<T>::MarkIndependent() {
- MarkIndependent(Isolate::GetCurrent());
-}
-
-template <class T>
-void Persistent<T>::MarkIndependent(Isolate* isolate) {
typedef internal::Internals I;
if (this->IsEmpty()) return;
- if (!I::IsInitialized(isolate)) return;
I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
true,
I::kNodeIsIndependentShift);
}
-template <class T>
-void Persistent<T>::MarkPartiallyDependent() {
- MarkPartiallyDependent(Isolate::GetCurrent());
-}
template <class T>
-void Persistent<T>::MarkPartiallyDependent(Isolate* isolate) {
+void Persistent<T>::MarkPartiallyDependent() {
typedef internal::Internals I;
if (this->IsEmpty()) return;
- if (!I::IsInitialized(isolate)) return;
I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
true,
I::kNodeIsPartiallyDependentShift);
}
-template <class T>
-void Persistent<T>::SetWrapperClassId(uint16_t class_id) {
- SetWrapperClassId(Isolate::GetCurrent(), class_id);
-}
-
template <class T>
void Persistent<T>::Reset(Isolate* isolate, const Handle<T>& other) {
@@ -5602,6 +5640,21 @@ void Persistent<T>::Reset(Isolate* isolate, const Handle<T>& other) {
}
+#ifndef V8_USE_UNSAFE_HANDLES
+template <class T>
+void Persistent<T>::Reset(Isolate* isolate, const Persistent<T>& other) {
+ Dispose(isolate);
+ if (other.IsEmpty()) {
+ this->val_ = NULL;
+ return;
+ }
+ internal::Object** p = reinterpret_cast<internal::Object**>(other.val_);
+ this->val_ = reinterpret_cast<T*>(
+ V8::GlobalizeReference(reinterpret_cast<internal::Isolate*>(isolate), p));
+}
+#endif
+
+
template <class T>
T* Persistent<T>::ClearAndLeak() {
T* old;
@@ -5617,25 +5670,19 @@ T* Persistent<T>::ClearAndLeak() {
template <class T>
-void Persistent<T>::SetWrapperClassId(Isolate* isolate, uint16_t class_id) {
+void Persistent<T>::SetWrapperClassId(uint16_t class_id) {
typedef internal::Internals I;
if (this->IsEmpty()) return;
- if (!I::IsInitialized(isolate)) return;
internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
*reinterpret_cast<uint16_t*>(addr) = class_id;
}
-template <class T>
-uint16_t Persistent<T>::WrapperClassId() const {
- return WrapperClassId(Isolate::GetCurrent());
-}
template <class T>
-uint16_t Persistent<T>::WrapperClassId(Isolate* isolate) const {
+uint16_t Persistent<T>::WrapperClassId() const {
typedef internal::Internals I;
if (this->IsEmpty()) return 0;
- if (!I::IsInitialized(isolate)) return 0;
internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
return *reinterpret_cast<uint16_t*>(addr);
@@ -5646,71 +5693,94 @@ template<typename T>
ReturnValue<T>::ReturnValue(internal::Object** slot) : value_(slot) {}
template<typename T>
-void ReturnValue<T>::Set(const Persistent<T>& handle) {
- *value_ = *reinterpret_cast<internal::Object**>(*handle);
+template<typename S>
+void ReturnValue<T>::Set(const Persistent<S>& handle) {
+ TYPE_CHECK(T, S);
+ if (V8_UNLIKELY(handle.IsEmpty())) {
+ *value_ = GetDefaultValue();
+ } else {
+ *value_ = *reinterpret_cast<internal::Object**>(*handle);
+ }
}
template<typename T>
-void ReturnValue<T>::Set(const Handle<T> handle) {
- *value_ = *reinterpret_cast<internal::Object**>(*handle);
+template<typename S>
+void ReturnValue<T>::Set(const Handle<S> handle) {
+ TYPE_CHECK(T, S);
+ if (V8_UNLIKELY(handle.IsEmpty())) {
+ *value_ = GetDefaultValue();
+ } else {
+ *value_ = *reinterpret_cast<internal::Object**>(*handle);
+ }
}
template<typename T>
-void ReturnValue<T>::Set(Isolate* isolate, double i) {
- Set(Number::New(isolate, i));
+void ReturnValue<T>::Set(double i) {
+ Set(Number::New(GetIsolate(), i));
}
template<typename T>
-void ReturnValue<T>::Set(Isolate* isolate, int32_t i) {
+void ReturnValue<T>::Set(int32_t i) {
typedef internal::Internals I;
if (V8_LIKELY(I::IsValidSmi(i))) {
*value_ = I::IntToSmi(i);
return;
}
- Set(Integer::New(i, isolate));
+ Set(Integer::New(i, GetIsolate()));
}
template<typename T>
-void ReturnValue<T>::Set(Isolate* isolate, uint32_t i) {
+void ReturnValue<T>::Set(uint32_t i) {
typedef internal::Internals I;
- if (V8_LIKELY(I::IsValidSmi(i))) {
- *value_ = I::IntToSmi(i);
+ // Can't simply use INT32_MAX here for whatever reason.
+ bool fits_into_int32_t = (i & (1 << 31)) == 0;
+ if (V8_LIKELY(fits_into_int32_t)) {
+ Set(static_cast<int32_t>(i));
return;
}
- Set(Integer::NewFromUnsigned(i, isolate));
+ Set(Integer::NewFromUnsigned(i, GetIsolate()));
}
template<typename T>
-void ReturnValue<T>::Set(Isolate* isolate, bool value) {
+void ReturnValue<T>::Set(bool value) {
+ typedef internal::Internals I;
+ int root_index;
if (value) {
- SetTrue(isolate);
+ root_index = I::kTrueValueRootIndex;
} else {
- SetFalse(isolate);
+ root_index = I::kFalseValueRootIndex;
}
+ *value_ = *I::GetRoot(GetIsolate(), root_index);
}
template<typename T>
-void ReturnValue<T>::SetTrue(Isolate* isolate) {
+void ReturnValue<T>::SetNull() {
typedef internal::Internals I;
- *value_ = *I::GetRoot(isolate, I::kTrueValueRootIndex);
+ *value_ = *I::GetRoot(GetIsolate(), I::kNullValueRootIndex);
}
template<typename T>
-void ReturnValue<T>::SetFalse(Isolate* isolate) {
+void ReturnValue<T>::SetUndefined() {
typedef internal::Internals I;
- *value_ = *I::GetRoot(isolate, I::kFalseValueRootIndex);
+ *value_ = *I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex);
}
template<typename T>
-void ReturnValue<T>::SetNull(Isolate* isolate) {
+void ReturnValue<T>::SetEmptyString() {
typedef internal::Internals I;
- *value_ = *I::GetRoot(isolate, I::kNullValueRootIndex);
+ *value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
}
template<typename T>
-void ReturnValue<T>::SetUndefined(Isolate* isolate) {
- typedef internal::Internals I;
- *value_ = *I::GetRoot(isolate, I::kUndefinedValueRootIndex);
+Isolate* ReturnValue<T>::GetIsolate() {
+ // Isolate is always the pointer below the default value on the stack.
+ return *reinterpret_cast<Isolate**>(&value_[-2]);
+}
+
+template<typename T>
+internal::Object* ReturnValue<T>::GetDefaultValue() {
+ // Default value is always the pointer below value_ on the stack.
+ return value_[-1];
}
@@ -5989,6 +6059,11 @@ bool Value::QuickIsString() const {
}
+template <class T> Value* Value::Cast(T* value) {
+ return static_cast<Value*>(value);
+}
+
+
Symbol* Symbol::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -6157,6 +6232,14 @@ Float64Array* Float64Array::Cast(v8::Value* value) {
}
+Uint8ClampedArray* Uint8ClampedArray::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Uint8ClampedArray*>(value);
+}
+
+
Function* Function::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc
index 2ce31b47c0..214af057db 100644
--- a/deps/v8/samples/lineprocessor.cc
+++ b/deps/v8/samples/lineprocessor.cc
@@ -25,10 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include <v8.h>
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -106,8 +102,8 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
v8::Handle<v8::String> ReadFile(const char* name);
v8::Handle<v8::String> ReadLine();
-v8::Handle<v8::Value> Print(const v8::Arguments& args);
-v8::Handle<v8::Value> ReadLine(const v8::Arguments& args);
+void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
+void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args);
bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Local<v8::Context> context,
bool report_exceptions);
@@ -130,7 +126,9 @@ void DispatchDebugMessages() {
// think about.
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope handle_scope(isolate);
- v8::Context::Scope scope(isolate, debug_message_context);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, debug_message_context);
+ v8::Context::Scope scope(context);
v8::Debug::ProcessDebugMessages();
}
@@ -220,8 +218,7 @@ int RunMain(int argc, char* argv[]) {
v8::Context::Scope context_scope(context);
#ifdef ENABLE_DEBUGGER_SUPPORT
- debug_message_context =
- v8::Persistent<v8::Context>::New(isolate, context);
+ debug_message_context.Reset(isolate, context);
v8::Locker locker(isolate);
@@ -396,7 +393,7 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
// The callback that is invoked by v8 whenever the JavaScript 'print'
// function is called. Prints its arguments on stdout separated by
// spaces and ending with a newline.
-v8::Handle<v8::Value> Print(const v8::Arguments& args) {
+void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
bool first = true;
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope(args.GetIsolate());
@@ -411,17 +408,17 @@ v8::Handle<v8::Value> Print(const v8::Arguments& args) {
}
printf("\n");
fflush(stdout);
- return v8::Undefined();
}
// The callback that is invoked by v8 whenever the JavaScript 'read_line'
// function is called. Reads a string from standard input and returns.
-v8::Handle<v8::Value> ReadLine(const v8::Arguments& args) {
+void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() > 0) {
- return v8::ThrowException(v8::String::New("Unexpected arguments"));
+ v8::ThrowException(v8::String::New("Unexpected arguments"));
+ return;
}
- return ReadLine();
+ args.GetReturnValue().Set(ReadLine());
}
v8::Handle<v8::String> ReadLine() {
@@ -437,7 +434,7 @@ v8::Handle<v8::String> ReadLine() {
}
if (res == NULL) {
v8::Handle<v8::Primitive> t = v8::Undefined();
- return v8::Handle<v8::String>(v8::String::Cast(*t));
+ return v8::Handle<v8::String>::Cast(t);
}
// Remove newline char
for (char* pos = buffer; *pos != '\0'; pos++) {
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index fd3a821639..97eec14dc3 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -25,11 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(dcarney): remove this
-#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include <v8.h>
#include <string>
@@ -107,18 +102,21 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
static Handle<ObjectTemplate> MakeMapTemplate(Isolate* isolate);
// Callbacks that access the individual fields of request objects.
- static Handle<Value> GetPath(Local<String> name, const AccessorInfo& info);
- static Handle<Value> GetReferrer(Local<String> name,
- const AccessorInfo& info);
- static Handle<Value> GetHost(Local<String> name, const AccessorInfo& info);
- static Handle<Value> GetUserAgent(Local<String> name,
- const AccessorInfo& info);
+ static void GetPath(Local<String> name,
+ const PropertyCallbackInfo<Value>& info);
+ static void GetReferrer(Local<String> name,
+ const PropertyCallbackInfo<Value>& info);
+ static void GetHost(Local<String> name,
+ const PropertyCallbackInfo<Value>& info);
+ static void GetUserAgent(Local<String> name,
+ const PropertyCallbackInfo<Value>& info);
// Callbacks that access maps
- static Handle<Value> MapGet(Local<String> name, const AccessorInfo& info);
- static Handle<Value> MapSet(Local<String> name,
- Local<Value> value,
- const AccessorInfo& info);
+ static void MapGet(Local<String> name,
+ const PropertyCallbackInfo<Value>& info);
+ static void MapSet(Local<String> name,
+ Local<Value> value,
+ const PropertyCallbackInfo<Value>& info);
// Utility methods for wrapping C++ objects as JavaScript objects,
// and going back again.
@@ -142,13 +140,12 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
// -------------------------
-static Handle<Value> LogCallback(const Arguments& args) {
- if (args.Length() < 1) return Undefined();
+static void LogCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() < 1) return;
HandleScope scope(args.GetIsolate());
Handle<Value> arg = args[0];
String::Utf8Value value(arg);
HttpRequestProcessor::Log(*value);
- return Undefined();
}
@@ -168,11 +165,12 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// is what we need for the reference to remain after we return from
// this method. That persistent handle has to be disposed in the
// destructor.
- context_.Reset(GetIsolate(), Context::New(GetIsolate(), NULL, global));
+ v8::Handle<v8::Context> context = Context::New(GetIsolate(), NULL, global);
+ context_.Reset(GetIsolate(), context);
// Enter the new context so all the following operations take place
// within it.
- Context::Scope context_scope(GetIsolate(), context_);
+ Context::Scope context_scope(context);
// Make the options mapping available within the context
if (!InstallMaps(opts, output))
@@ -185,7 +183,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// The script compiled and ran correctly. Now we fetch out the
// Process function from the global object.
Handle<String> process_name = String::New("Process");
- Handle<Value> process_val = context_->Global()->Get(process_name);
+ Handle<Value> process_val = context->Global()->Get(process_name);
// If there is no Process function, or if it is not a function,
// bail out
@@ -196,7 +194,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// Store the function in a Persistent handle, since we also want
// that to remain after this call returns
- process_ = Persistent<Function>::New(GetIsolate(), process_fun);
+ process_.Reset(GetIsolate(), process_fun);
// All done; all went well
return true;
@@ -239,11 +237,14 @@ bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
// Wrap the map object in a JavaScript wrapper
Handle<Object> opts_obj = WrapMap(opts);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(GetIsolate(), context_);
+
// Set the options object as a property on the global object.
- context_->Global()->Set(String::New("options"), opts_obj);
+ context->Global()->Set(String::New("options"), opts_obj);
Handle<Object> output_obj = WrapMap(output);
- context_->Global()->Set(String::New("output"), output_obj);
+ context->Global()->Set(String::New("output"), output_obj);
return true;
}
@@ -253,9 +254,12 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {
// Create a handle scope to keep the temporary object references.
HandleScope handle_scope(GetIsolate());
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(GetIsolate(), context_);
+
// Enter this processor's context so all the remaining operations
// take place there
- Context::Scope context_scope(GetIsolate(), context_);
+ Context::Scope context_scope(context);
// Wrap the C++ request object in a JavaScript wrapper
Handle<Object> request_obj = WrapRequest(request);
@@ -267,7 +271,9 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {
// and one argument, the request.
const int argc = 1;
Handle<Value> argv[argc] = { request_obj };
- Handle<Value> result = process_->Call(context_->Global(), argc, argv);
+ v8::Local<v8::Function> process =
+ v8::Local<v8::Function>::New(GetIsolate(), process_);
+ Handle<Value> result = process->Call(context->Global(), argc, argv);
if (result.IsEmpty()) {
String::Utf8Value error(try_catch.Exception());
Log(*error);
@@ -306,7 +312,7 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// It only has to be created once, which we do on demand.
if (map_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeMapTemplate(GetIsolate());
- map_template_ = Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
+ map_template_.Reset(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ =
Local<ObjectTemplate>::New(GetIsolate(), map_template_);
@@ -346,8 +352,8 @@ string ObjectToString(Local<Value> value) {
}
-Handle<Value> JsHttpRequestProcessor::MapGet(Local<String> name,
- const AccessorInfo& info) {
+void JsHttpRequestProcessor::MapGet(Local<String> name,
+ const PropertyCallbackInfo<Value>& info) {
// Fetch the map wrapped by this object.
map<string, string>* obj = UnwrapMap(info.Holder());
@@ -358,17 +364,18 @@ Handle<Value> JsHttpRequestProcessor::MapGet(Local<String> name,
map<string, string>::iterator iter = obj->find(key);
// If the key is not present return an empty handle as signal
- if (iter == obj->end()) return Handle<Value>();
+ if (iter == obj->end()) return;
// Otherwise fetch the value and wrap it in a JavaScript string
const string& value = (*iter).second;
- return String::New(value.c_str(), static_cast<int>(value.length()));
+ info.GetReturnValue().Set(
+ String::New(value.c_str(), static_cast<int>(value.length())));
}
-Handle<Value> JsHttpRequestProcessor::MapSet(Local<String> name,
- Local<Value> value_obj,
- const AccessorInfo& info) {
+void JsHttpRequestProcessor::MapSet(Local<String> name,
+ Local<Value> value_obj,
+ const PropertyCallbackInfo<Value>& info) {
// Fetch the map wrapped by this object.
map<string, string>* obj = UnwrapMap(info.Holder());
@@ -380,7 +387,7 @@ Handle<Value> JsHttpRequestProcessor::MapSet(Local<String> name,
(*obj)[key] = value;
// Return the value; any non-empty handle will work.
- return value_obj;
+ info.GetReturnValue().Set(value_obj);
}
@@ -413,8 +420,7 @@ Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// It only has to be created once, which we do on demand.
if (request_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeRequestTemplate(GetIsolate());
- request_template_ =
- Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
+ request_template_.Reset(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ =
Local<ObjectTemplate>::New(GetIsolate(), request_template_);
@@ -448,8 +454,8 @@ HttpRequest* JsHttpRequestProcessor::UnwrapRequest(Handle<Object> obj) {
}
-Handle<Value> JsHttpRequestProcessor::GetPath(Local<String> name,
- const AccessorInfo& info) {
+void JsHttpRequestProcessor::GetPath(Local<String> name,
+ const PropertyCallbackInfo<Value>& info) {
// Extract the C++ request object from the JavaScript wrapper.
HttpRequest* request = UnwrapRequest(info.Holder());
@@ -457,31 +463,37 @@ Handle<Value> JsHttpRequestProcessor::GetPath(Local<String> name,
const string& path = request->Path();
// Wrap the result in a JavaScript string and return it.
- return String::New(path.c_str(), static_cast<int>(path.length()));
+ info.GetReturnValue().Set(
+ String::New(path.c_str(), static_cast<int>(path.length())));
}
-Handle<Value> JsHttpRequestProcessor::GetReferrer(Local<String> name,
- const AccessorInfo& info) {
+void JsHttpRequestProcessor::GetReferrer(
+ Local<String> name,
+ const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Referrer();
- return String::New(path.c_str(), static_cast<int>(path.length()));
+ info.GetReturnValue().Set(
+ String::New(path.c_str(), static_cast<int>(path.length())));
}
-Handle<Value> JsHttpRequestProcessor::GetHost(Local<String> name,
- const AccessorInfo& info) {
+void JsHttpRequestProcessor::GetHost(Local<String> name,
+ const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Host();
- return String::New(path.c_str(), static_cast<int>(path.length()));
+ info.GetReturnValue().Set(
+ String::New(path.c_str(), static_cast<int>(path.length())));
}
-Handle<Value> JsHttpRequestProcessor::GetUserAgent(Local<String> name,
- const AccessorInfo& info) {
+void JsHttpRequestProcessor::GetUserAgent(
+ Local<String> name,
+ const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->UserAgent();
- return String::New(path.c_str(), static_cast<int>(path.length()));
+ info.GetReturnValue().Set(
+ String::New(path.c_str(), static_cast<int>(path.length())));
}
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index da18cc71d3..a0af931b23 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -25,11 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(dcarney): remove this
-#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include <v8.h>
#include <assert.h>
#include <fcntl.h>
@@ -58,11 +53,11 @@ bool ExecuteString(v8::Isolate* isolate,
v8::Handle<v8::Value> name,
bool print_result,
bool report_exceptions);
-v8::Handle<v8::Value> Print(const v8::Arguments& args);
-v8::Handle<v8::Value> Read(const v8::Arguments& args);
-v8::Handle<v8::Value> Load(const v8::Arguments& args);
-v8::Handle<v8::Value> Quit(const v8::Arguments& args);
-v8::Handle<v8::Value> Version(const v8::Arguments& args);
+void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
+void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
+void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
+void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
+void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
v8::Handle<v8::String> ReadFile(const char* name);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
@@ -121,7 +116,7 @@ v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate) {
// The callback that is invoked by v8 whenever the JavaScript 'print'
// function is called. Prints its arguments on stdout separated by
// spaces and ending with a newline.
-v8::Handle<v8::Value> Print(const v8::Arguments& args) {
+void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
bool first = true;
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope(args.GetIsolate());
@@ -136,70 +131,73 @@ v8::Handle<v8::Value> Print(const v8::Arguments& args) {
}
printf("\n");
fflush(stdout);
- return v8::Undefined();
}
// The callback that is invoked by v8 whenever the JavaScript 'read'
// function is called. This function loads the content of the file named in
// the argument into a JavaScript string.
-v8::Handle<v8::Value> Read(const v8::Arguments& args) {
+void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- return v8::ThrowException(v8::String::New("Bad parameters"));
+ v8::ThrowException(v8::String::New("Bad parameters"));
+ return;
}
v8::String::Utf8Value file(args[0]);
if (*file == NULL) {
- return v8::ThrowException(v8::String::New("Error loading file"));
+ v8::ThrowException(v8::String::New("Error loading file"));
+ return;
}
v8::Handle<v8::String> source = ReadFile(*file);
if (source.IsEmpty()) {
- return v8::ThrowException(v8::String::New("Error loading file"));
+ v8::ThrowException(v8::String::New("Error loading file"));
+ return;
}
- return source;
+ args.GetReturnValue().Set(source);
}
// The callback that is invoked by v8 whenever the JavaScript 'load'
// function is called. Loads, compiles and executes its argument
// JavaScript file.
-v8::Handle<v8::Value> Load(const v8::Arguments& args) {
+void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope(args.GetIsolate());
v8::String::Utf8Value file(args[i]);
if (*file == NULL) {
- return v8::ThrowException(v8::String::New("Error loading file"));
+ v8::ThrowException(v8::String::New("Error loading file"));
+ return;
}
v8::Handle<v8::String> source = ReadFile(*file);
if (source.IsEmpty()) {
- return v8::ThrowException(v8::String::New("Error loading file"));
+ v8::ThrowException(v8::String::New("Error loading file"));
+ return;
}
if (!ExecuteString(args.GetIsolate(),
source,
v8::String::New(*file),
false,
false)) {
- return v8::ThrowException(v8::String::New("Error executing file"));
+ v8::ThrowException(v8::String::New("Error executing file"));
+ return;
}
}
- return v8::Undefined();
}
// The callback that is invoked by v8 whenever the JavaScript 'quit'
// function is called. Quits.
-v8::Handle<v8::Value> Quit(const v8::Arguments& args) {
+void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
// If not arguments are given args[0] will yield undefined which
// converts to the integer value 0.
int exit_code = args[0]->Int32Value();
fflush(stdout);
fflush(stderr);
exit(exit_code);
- return v8::Undefined();
}
-v8::Handle<v8::Value> Version(const v8::Arguments& args) {
- return v8::String::New(v8::V8::GetVersion());
+void Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetReturnValue().Set(v8::String::New(v8::V8::GetVersion()));
}
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 64047a2847..e441de47ee 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -687,7 +687,7 @@ const AccessorDescriptor Accessors::FunctionArguments = {
class FrameFunctionIterator {
public:
- FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
+ FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
: frame_iterator_(isolate),
functions_(2),
index_(0) {
@@ -734,13 +734,13 @@ class FrameFunctionIterator {
MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
if (holder->shared()->native()) return isolate->heap()->null_value();
Handle<JSFunction> function(holder, isolate);
- FrameFunctionIterator it(isolate, no_alloc);
+ FrameFunctionIterator it(isolate, no_allocation);
// Find the function from the frames.
if (!it.Find(*function)) {
@@ -793,9 +793,9 @@ const AccessorDescriptor Accessors::FunctionCaller = {
// Accessors::MakeModuleExport
//
-static v8::Handle<v8::Value> ModuleGetExport(
+static void ModuleGetExport(
v8::Local<v8::String> property,
- const v8::AccessorInfo& info) {
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
ASSERT(context->IsModuleContext());
@@ -807,16 +807,16 @@ static v8::Handle<v8::Value> ModuleGetExport(
isolate->ScheduleThrow(
*isolate->factory()->NewReferenceError("not_defined",
HandleVector(&name, 1)));
- return v8::Handle<v8::Value>();
+ return;
}
- return v8::Utils::ToLocal(Handle<Object>(value, isolate));
+ info.GetReturnValue().Set(v8::Utils::ToLocal(Handle<Object>(value, isolate)));
}
static void ModuleSetExport(
v8::Local<v8::String> property,
v8::Local<v8::Value> value,
- const v8::AccessorInfo& info) {
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
ASSERT(context->IsModuleContext());
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 7099ca8ddd..20496fefde 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "api.h"
#include <string.h> // For memcpy, strlen.
@@ -35,6 +32,7 @@
#include "../include/v8-debug.h"
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
+#include "assert-scope.h"
#include "bootstrapper.h"
#include "code-stubs.h"
#include "compiler.h"
@@ -625,31 +623,22 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
}
-void V8::MakeWeak(i::Isolate* isolate,
- i::Object** object,
+void V8::MakeWeak(i::Object** object,
void* parameters,
- RevivableCallback weak_reference_callback,
- NearDeathCallback near_death_callback) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "MakeWeak");
- isolate->global_handles()->MakeWeak(object,
- parameters,
- weak_reference_callback,
- near_death_callback);
+ RevivableCallback weak_reference_callback) {
+ i::GlobalHandles::MakeWeak(object,
+ parameters,
+ weak_reference_callback);
}
-void V8::ClearWeak(i::Isolate* isolate, i::Object** obj) {
- LOG_API(isolate, "ClearWeak");
- isolate->global_handles()->ClearWeakness(obj);
+void V8::ClearWeak(i::Object** obj) {
+ i::GlobalHandles::ClearWeakness(obj);
}
-void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "DisposeGlobal");
- if (!isolate->IsInitialized()) return;
- isolate->global_handles()->Destroy(obj);
+void V8::DisposeGlobal(i::Object** obj) {
+ i::GlobalHandles::Destroy(obj);
}
// --- H a n d l e s ---
@@ -686,19 +675,7 @@ HandleScope::~HandleScope() {
void HandleScope::Leave() {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- current->level--;
- ASSERT(current->level >= 0);
- current->next = prev_next_;
- if (current->limit != prev_limit_) {
- current->limit = prev_limit_;
- i::HandleScope::DeleteExtensions(isolate_);
- }
-
-#ifdef ENABLE_EXTRA_CHECKS
- i::HandleScope::ZapRange(prev_next_, prev_limit_);
-#endif
+ return i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
}
@@ -909,7 +886,8 @@ void NeanderArray::add(i::Handle<i::Object> value) {
int length = this->length();
int size = obj_.size();
if (length == size - 1) {
- i::Handle<i::FixedArray> new_elms = FACTORY->NewFixedArray(2 * size);
+ i::Factory* factory = i::Isolate::Current()->factory();
+ i::Handle<i::FixedArray> new_elms = factory->NewFixedArray(2 * size);
for (int i = 0; i < length; i++)
new_elms->set(i + 1, get(i));
obj_.value()->set_elements(*new_elms);
@@ -985,7 +963,7 @@ void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
template<typename Callback>
static Local<FunctionTemplate> FunctionTemplateNew(
- Callback callback_in,
+ Callback callback,
v8::Handle<Value> data,
v8::Handle<Signature> signature,
int length) {
@@ -1001,10 +979,8 @@ static Local<FunctionTemplate> FunctionTemplateNew(
int next_serial_number = isolate->next_serial_number();
isolate->set_next_serial_number(next_serial_number + 1);
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
- if (callback_in != 0) {
+ if (callback != 0) {
if (data.IsEmpty()) data = v8::Undefined();
- InvocationCallback callback =
- i::CallbackTable::Register(isolate, callback_in);
Utils::ToLocal(obj)->SetCallHandler(callback, data);
}
obj->set_length(length);
@@ -1228,7 +1204,7 @@ int TypeSwitch::match(v8::Handle<Value> value) {
template<typename Callback>
static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template,
- Callback callback,
+ Callback callback_in,
v8::Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(function_template)->GetIsolate();
if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
@@ -1238,6 +1214,8 @@ static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template,
isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ FunctionCallback callback =
+ i::CallbackTable::Register(isolate, callback_in);
SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
@@ -1284,9 +1262,11 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
i::Handle<i::ExecutableAccessorInfo> obj =
isolate->factory()->NewExecutableAccessorInfo();
- AccessorGetter getter = i::CallbackTable::Register(isolate, getter_in);
+ AccessorGetterCallback getter =
+ i::CallbackTable::Register(isolate, getter_in);
SET_FIELD_WRAPPED(obj, set_getter, getter);
- AccessorSetter setter = i::CallbackTable::Register(isolate, setter_in);
+ AccessorSetterCallback setter =
+ i::CallbackTable::Register(isolate, setter_in);
SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
@@ -1389,16 +1369,19 @@ static void SetNamedInstancePropertyHandler(
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
- NamedPropertyGetter getter = i::CallbackTable::Register(isolate, getter_in);
+ NamedPropertyGetterCallback getter =
+ i::CallbackTable::Register(isolate, getter_in);
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- NamedPropertySetter setter = i::CallbackTable::Register(isolate, setter_in);
+ NamedPropertySetterCallback setter =
+ i::CallbackTable::Register(isolate, setter_in);
if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- NamedPropertyQuery query = i::CallbackTable::Register(isolate, query_in);
+ NamedPropertyQueryCallback query =
+ i::CallbackTable::Register(isolate, query_in);
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- NamedPropertyDeleter remover =
+ NamedPropertyDeleterCallback remover =
i::CallbackTable::Register(isolate, remover_in);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- NamedPropertyEnumerator enumerator =
+ NamedPropertyEnumeratorCallback enumerator =
i::CallbackTable::Register(isolate, enumerator_in);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
@@ -1434,18 +1417,19 @@ static void SetIndexedInstancePropertyHandler(
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
- IndexedPropertyGetter getter =
+ IndexedPropertyGetterCallback getter =
i::CallbackTable::Register(isolate, getter_in);
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- IndexedPropertySetter setter =
+ IndexedPropertySetterCallback setter =
i::CallbackTable::Register(isolate, setter_in);
if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- IndexedPropertyQuery query = i::CallbackTable::Register(isolate, query_in);
+ IndexedPropertyQueryCallback query =
+ i::CallbackTable::Register(isolate, query_in);
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- IndexedPropertyDeleter remover =
+ IndexedPropertyDeleterCallback remover =
i::CallbackTable::Register(isolate, remover_in);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- IndexedPropertyEnumerator enumerator =
+ IndexedPropertyEnumeratorCallback enumerator =
i::CallbackTable::Register(isolate, enumerator_in);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
@@ -1471,7 +1455,7 @@ static void SetInstanceCallAsFunctionHandler(
isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- InvocationCallback callback =
+ FunctionCallback callback =
i::CallbackTable::Register(isolate, callback_in);
SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined();
@@ -3879,7 +3863,8 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_string = FACTORY->InternalizeString(key_obj);
+ i::Handle<i::String> key_string =
+ isolate->factory()->InternalizeString(key_obj);
i::Handle<i::Object> result(self->GetHiddenProperty(*key_string), isolate);
if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
@@ -3893,7 +3878,8 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_string = FACTORY->InternalizeString(key_obj);
+ i::Handle<i::String> key_string =
+ isolate->factory()->InternalizeString(key_obj);
self->DeleteHiddenProperty(*key_string);
return true;
}
@@ -4317,6 +4303,124 @@ bool String::IsOneByte() const {
return str->HasOnlyOneByteChars();
}
+// Helpers for ContainsOnlyOneByteHelper
+template<size_t size> struct OneByteMask;
+template<> struct OneByteMask<4> {
+ static const uint32_t value = 0xFF00FF00;
+};
+template<> struct OneByteMask<8> {
+ static const uint64_t value = V8_2PART_UINT64_C(0xFF00FF00, FF00FF00);
+};
+static const uintptr_t kOneByteMask = OneByteMask<sizeof(uintptr_t)>::value;
+static const uintptr_t kAlignmentMask = sizeof(uintptr_t) - 1;
+static inline bool Unaligned(const uint16_t* chars) {
+ return reinterpret_cast<const uintptr_t>(chars) & kAlignmentMask;
+}
+static inline const uint16_t* Align(const uint16_t* chars) {
+ return reinterpret_cast<uint16_t*>(
+ reinterpret_cast<uintptr_t>(chars) & ~kAlignmentMask);
+}
+
+class ContainsOnlyOneByteHelper {
+ public:
+ ContainsOnlyOneByteHelper() : is_one_byte_(true) {}
+ bool Check(i::String* string) {
+ i::ConsString* cons_string = i::String::VisitFlat(this, string, 0);
+ if (cons_string == NULL) return is_one_byte_;
+ return CheckCons(cons_string);
+ }
+ void VisitOneByteString(const uint8_t* chars, int length) {
+ // Nothing to do.
+ }
+ void VisitTwoByteString(const uint16_t* chars, int length) {
+ // Accumulated bits.
+ uintptr_t acc = 0;
+ // Align to uintptr_t.
+ const uint16_t* end = chars + length;
+ while (Unaligned(chars) && chars != end) {
+ acc |= *chars++;
+ }
+ // Read word aligned in blocks,
+ // checking the return value at the end of each block.
+ const uint16_t* aligned_end = Align(end);
+ const int increment = sizeof(uintptr_t)/sizeof(uint16_t);
+ const int inner_loops = 16;
+ while (chars + inner_loops*increment < aligned_end) {
+ for (int i = 0; i < inner_loops; i++) {
+ acc |= *reinterpret_cast<const uintptr_t*>(chars);
+ chars += increment;
+ }
+ // Check for early return.
+ if ((acc & kOneByteMask) != 0) {
+ is_one_byte_ = false;
+ return;
+ }
+ }
+ // Read the rest.
+ while (chars != end) {
+ acc |= *chars++;
+ }
+ // Check result.
+ if ((acc & kOneByteMask) != 0) is_one_byte_ = false;
+ }
+
+ private:
+ bool CheckCons(i::ConsString* cons_string) {
+ while (true) {
+ // Check left side if flat.
+ i::String* left = cons_string->first();
+ i::ConsString* left_as_cons =
+ i::String::VisitFlat(this, left, 0);
+ if (!is_one_byte_) return false;
+ // Check right side if flat.
+ i::String* right = cons_string->second();
+ i::ConsString* right_as_cons =
+ i::String::VisitFlat(this, right, 0);
+ if (!is_one_byte_) return false;
+ // Standard recurse/iterate trick.
+ if (left_as_cons != NULL && right_as_cons != NULL) {
+ if (left->length() < right->length()) {
+ CheckCons(left_as_cons);
+ cons_string = right_as_cons;
+ } else {
+ CheckCons(right_as_cons);
+ cons_string = left_as_cons;
+ }
+ // Check fast return.
+ if (!is_one_byte_) return false;
+ continue;
+ }
+ // Descend left in place.
+ if (left_as_cons != NULL) {
+ cons_string = left_as_cons;
+ continue;
+ }
+ // Descend right in place.
+ if (right_as_cons != NULL) {
+ cons_string = right_as_cons;
+ continue;
+ }
+ // Terminate.
+ break;
+ }
+ return is_one_byte_;
+ }
+ bool is_one_byte_;
+ DISALLOW_COPY_AND_ASSIGN(ContainsOnlyOneByteHelper);
+};
+
+
+bool String::ContainsOnlyOneByte() const {
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (IsDeadCheck(str->GetIsolate(),
+ "v8::String::ContainsOnlyOneByte()")) {
+ return false;
+ }
+ if (str->HasOnlyOneByteChars()) return true;
+ ContainsOnlyOneByteHelper helper;
+ return helper.Check(*str);
+}
+
class Utf8LengthHelper : public i::AllStatic {
public:
@@ -5056,6 +5160,15 @@ void v8::V8::SetJitCodeEventHandler(
isolate->logger()->SetCodeEventHandler(options, event_handler);
}
+void v8::V8::SetArrayBufferAllocator(
+ ArrayBuffer::Allocator* allocator) {
+ if (!ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
+ "v8::V8::SetArrayBufferAllocator",
+ "ArrayBufferAllocator might only be set once"))
+ return;
+ i::V8::SetArrayBufferAllocator(allocator);
+}
+
bool v8::V8::Dispose() {
i::Isolate* isolate = i::Isolate::Current();
@@ -5107,8 +5220,9 @@ class VisitorAdapter : public i::ObjectVisitor {
UNREACHABLE();
}
virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
- visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)),
- class_id);
+ Value* value = ToApi<Value>(i::Handle<i::Object>(p));
+ visitor_->VisitPersistentHandle(
+ reinterpret_cast<Persistent<Value>*>(&value), class_id);
}
private:
PersistentHandleVisitor* visitor_;
@@ -5119,7 +5233,7 @@ void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
- i::AssertNoAllocation no_allocation;
+ i::DisallowHeapAllocation no_allocation;
VisitorAdapter visitor_adapter(visitor);
isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
@@ -5132,7 +5246,7 @@ void v8::V8::VisitHandlesForPartialDependence(
ASSERT(isolate == i::Isolate::Current());
IsDeadCheck(isolate, "v8::V8::VisitHandlesForPartialDependence");
- i::AssertNoAllocation no_allocation;
+ i::DisallowHeapAllocation no_allocation;
VisitorAdapter visitor_adapter(visitor);
isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds(
@@ -5914,13 +6028,14 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
+ i::Isolate* isolate = i::Isolate::Current();
uint8_t flags_buf[3];
int num_flags = 0;
if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
- return FACTORY->InternalizeOneByteString(
+ return isolate->factory()->InternalizeOneByteString(
i::Vector<const uint8_t>(flags_buf, num_flags));
}
@@ -6019,19 +6134,48 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
}
-size_t v8::ArrayBuffer::ByteLength() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0;
+bool v8::ArrayBuffer::IsExternal() const {
+ return Utils::OpenHandle(this)->is_external();
+}
+
+v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
- return static_cast<size_t>(obj->byte_length()->Number());
+ ApiCheck(!obj->is_external(),
+ "v8::ArrayBuffer::Externalize",
+ "ArrayBuffer already externalized");
+ obj->set_is_external(true);
+ size_t byte_length = static_cast<size_t>(obj->byte_length()->Number());
+ Contents contents;
+ contents.data_ = obj->backing_store();
+ contents.byte_length_ = byte_length;
+ return contents;
}
-void* v8::ArrayBuffer::Data() const {
+void v8::ArrayBuffer::Neuter() {
+ i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
+ i::Isolate* isolate = obj->GetIsolate();
+ ApiCheck(obj->is_external(),
+ "v8::ArrayBuffer::Neuter",
+ "Only externalized ArrayBuffers can be neutered");
+ LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
+ ENTER_V8(isolate);
+
+ for (i::Handle<i::Object> array_obj(obj->weak_first_array(), isolate);
+ *array_obj != i::Smi::FromInt(0);) {
+ i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*array_obj));
+ typed_array->Neuter();
+ array_obj = i::handle(typed_array->weak_next(), isolate);
+ }
+ obj->Neuter();
+}
+
+
+size_t v8::ArrayBuffer::ByteLength() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ArrayBuffer::Data()")) return 0;
+ if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0;
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
- return obj->backing_store();
+ return static_cast<size_t>(obj->byte_length()->Number());
}
@@ -6054,7 +6198,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
ENTER_V8(isolate);
i::Handle<i::JSArrayBuffer> obj =
isolate->factory()->NewJSArrayBuffer();
- i::Runtime::SetupArrayBuffer(isolate, obj, data, byte_length);
+ i::Runtime::SetupArrayBuffer(isolate, obj, true, data, byte_length);
return Utils::ToLocal(obj);
}
@@ -6121,6 +6265,9 @@ i::Handle<i::JSTypedArray> NewTypedArray(
obj->set_buffer(*buffer);
+ obj->set_weak_next(buffer->weak_first_array());
+ buffer->set_weak_first_array(*obj);
+
i::Handle<i::Object> byte_offset_object = isolate->factory()->NewNumber(
static_cast<double>(byte_offset));
obj->set_byte_offset(*byte_offset_object);
@@ -6265,14 +6412,12 @@ Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
#ifdef DEBUG
-v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate)
- : isolate_(isolate),
- last_state_(i::EnterAllocationScope(
- reinterpret_cast<i::Isolate*>(isolate), false)) {
+v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate) {
+ disallow_heap_allocation_ = new i::DisallowHeapAllocation();
}
v8::AssertNoGCScope::~AssertNoGCScope() {
- i::ExitAllocationScope(reinterpret_cast<i::Isolate*>(isolate_), last_state_);
+ delete static_cast<i::DisallowHeapAllocation*>(disallow_heap_allocation_);
}
#endif
@@ -6359,42 +6504,6 @@ void V8::SetFailedAccessCheckCallbackFunction(
}
-void V8::AddObjectGroup(Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddObjectGroup(
- reinterpret_cast<i::Object***>(objects), length, info);
-}
-
-
-void V8::AddObjectGroup(Isolate* exported_isolate,
- Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
- ASSERT(isolate == i::Isolate::Current());
- if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddObjectGroup(
- reinterpret_cast<i::Object***>(objects), length, info);
-}
-
-
-void V8::AddImplicitReferences(Persistent<Object> parent,
- Persistent<Value>* children,
- size_t length) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddImplicitReferences()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddImplicitReferences(
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*parent)).location(),
- reinterpret_cast<i::Object***>(children), length);
-}
-
-
intptr_t Isolate::AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
@@ -7228,6 +7337,12 @@ const CpuProfile* CpuProfiler::GetCpuProfile(int index,
}
+const CpuProfile* CpuProfiler::GetCpuProfile(int index) {
+ return reinterpret_cast<const CpuProfile*>(
+ reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(NULL, index));
+}
+
+
const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
Handle<Value> security_token) {
i::Isolate* isolate = i::Isolate::Current();
@@ -7287,6 +7402,14 @@ const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title,
}
+const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
+ return reinterpret_cast<const CpuProfile*>(
+ reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
+ NULL,
+ *Utils::OpenHandle(*title)));
+}
+
+
void CpuProfiler::DeleteAllProfiles() {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles");
@@ -7826,8 +7949,7 @@ DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
while (!blocks_.is_empty()) {
Object** block_start = blocks_.last();
Object** block_limit = &block_start[kHandleBlockSize];
- // We should not need to check for NoHandleAllocation here. Assert
- // this.
+ // We should not need to check for SealHandleScope here. Assert this.
ASSERT(prev_limit == block_limit ||
!(block_start <= prev_limit && prev_limit <= block_limit));
if (prev_limit == block_limit) break;
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 12d6e3d082..3c141f7097 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -126,8 +126,9 @@ template <typename T> inline T ToCData(v8::internal::Object* obj) {
template <typename T>
inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
+ v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- return FACTORY->NewForeign(
+ return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
}
@@ -636,8 +637,13 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
internal::Object** block_start = blocks_.last();
internal::Object** block_limit = block_start + kHandleBlockSize;
#ifdef DEBUG
- // NoHandleAllocation may make the prev_limit to point inside the block.
- if (block_start <= prev_limit && prev_limit <= block_limit) break;
+ // SealHandleScope may make the prev_limit to point inside the block.
+ if (block_start <= prev_limit && prev_limit <= block_limit) {
+#ifdef ENABLE_EXTRA_CHECKS
+ internal::HandleScope::ZapRange(prev_limit, block_limit);
+#endif
+ break;
+ }
#else
if (prev_limit == block_limit) break;
#endif
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index a80b613615..f9dca110c3 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -52,7 +52,8 @@ class Arguments BASE_EMBEDDED {
Object*& operator[] (int index) {
ASSERT(0 <= index && index < length_);
- return arguments_[-index];
+ return *(reinterpret_cast<Object**>(reinterpret_cast<intptr_t>(arguments_) -
+ index * kPointerSize));
}
template <class S> Handle<S> at(int index) {
@@ -152,8 +153,7 @@ class Arguments BASE_EMBEDDED {
// TODO(dcarney): Remove this class when old callbacks are gone.
class CallbackTable {
public:
- // TODO(dcarney): Flip this when it makes sense for performance.
- static const bool kStoreVoidFunctions = true;
+ static const bool kStoreVoidFunctions = false;
static inline bool ReturnsVoid(Isolate* isolate, void* function) {
CallbackTable* table = isolate->callback_table();
bool contains =
@@ -171,13 +171,13 @@ class CallbackTable {
}
#define WRITE_REGISTER(OldFunction, NewFunction) \
- static OldFunction Register(Isolate* isolate, NewFunction f) { \
- InsertCallback(isolate, FunctionToVoidPtr(f), true); \
- return reinterpret_cast<OldFunction>(f); \
+ static NewFunction Register(Isolate* isolate, OldFunction f) { \
+ InsertCallback(isolate, FunctionToVoidPtr(f), false); \
+ return reinterpret_cast<NewFunction>(f); \
} \
\
- static OldFunction Register(Isolate* isolate, OldFunction f) { \
- InsertCallback(isolate, FunctionToVoidPtr(f), false); \
+ static NewFunction Register(Isolate* isolate, NewFunction f) { \
+ InsertCallback(isolate, FunctionToVoidPtr(f), true); \
return f; \
}
FOR_EACH_CALLBACK_TABLE_MAPPING(WRITE_REGISTER)
@@ -254,6 +254,10 @@ class PropertyCallbackArguments
values[T::kHolderIndex] = holder;
values[T::kDataIndex] = data;
values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
+ // Here the hole is set as default value.
+ // It cannot escape into js as it's remove in Call below.
+ values[T::kReturnValueDefaultValueIndex] =
+ isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
ASSERT(values[T::kHolderIndex]->IsHeapObject());
ASSERT(values[T::kIsolateIndex]->IsSmi());
@@ -314,6 +318,10 @@ class FunctionCallbackArguments
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
+ // Here the hole is set as default value.
+ // It cannot escape into js as it's remove in Call below.
+ values[T::kReturnValueDefaultValueIndex] =
+ isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
ASSERT(values[T::kCalleeIndex]->IsJSFunction());
ASSERT(values[T::kHolderIndex]->IsHeapObject());
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 0102f337bf..c6ea6006fe 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -308,7 +308,7 @@ Operand::Operand(Handle<Object> handle) {
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
- ALLOW_HANDLE_DEREF(isolate, "using and embedding raw address");
+ AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
@@ -1368,6 +1368,7 @@ void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
void Assembler::sdiv(Register dst, Register src1, Register src2,
Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(IsEnabled(SUDIV));
emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
src2.code()*B8 | B4 | src1.code());
}
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 6333924ca0..4d7bc8ef2f 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -480,15 +480,20 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle the
- // construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ } else {
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle the
+ // construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->InternalArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+ }
}
@@ -513,15 +518,24 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ // tail call a stub
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ mov(r2, Operand(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ } else {
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+ }
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index c667c90721..b26bf7ede2 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -30,7 +30,6 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
-#include "builtins-decls.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
@@ -45,7 +44,6 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
static Register registers[] = { r3, r2, r1 };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
}
@@ -57,7 +55,6 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
static Register registers[] = { r3, r2, r1, r0 };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
}
@@ -80,7 +77,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { r0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@@ -91,7 +87,6 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { r1 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@@ -127,8 +122,8 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
- descriptor->miss_handler_ =
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
}
@@ -150,7 +145,29 @@ static void InitializeArrayConstructorDescriptor(
descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // r0 -- number of arguments
+ // r1 -- constructor function
+ static Register registers[] = { r1 };
+ descriptor->register_param_count_ = 1;
+
+ if (constant_stack_parameter_count != 0) {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &r0;
+ }
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
}
@@ -175,6 +192,40 @@ void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@@ -218,7 +269,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
for (int i = 0; i < param_count; ++i) {
__ push(descriptor->register_params_[i]);
}
- ExternalReference miss = descriptor->miss_handler_;
+ ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
}
@@ -649,7 +700,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ cmp(r0, r1);
__ b(ne, &not_identical);
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
@@ -1207,116 +1258,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
-// The stub expects its argument in the tos_ register and returns its result in
-// it, too: zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- const Register map = r9.is(tos_) ? r7 : r9;
-
- // undefined -> false.
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value.
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- __ SmiTst(tos_);
- // tos_ contains the correct return value already
- __ Ret(eq);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(tos_, &patch);
- }
-
- if (types_.NeedsMap()) {
- __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- __ mov(tos_, Operand::Zero(), LeaveCC, ne);
- __ Ret(ne);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // Spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
- // tos_ contains the correct non-zero return value already.
- __ Ret(ge);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
- __ Ret(lt); // the string length is OK as the return value
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // Heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &not_heap_number);
-
- __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(d1, 0.0);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN
- __ Ret();
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- __ LoadRoot(ip, value);
- __ cmp(tos_, ip);
- // The value of a root is never NULL, so we can avoid loading a non-null
- // value into tos_ when we want to return 'true'.
- if (!result) {
- __ mov(tos_, Operand::Zero(), LeaveCC, eq);
- }
- __ Ret(eq);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- if (!tos_.is(r3)) {
- __ mov(r3, Operand(tos_));
- }
- __ mov(r2, Operand(Smi::FromInt(tos_.code())));
- __ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
- __ Push(r3, r2, r1);
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@@ -1766,6 +1707,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
__ Ret();
if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm, SUDIV);
Label result_not_zero;
__ bind(&div_with_sdiv);
@@ -1822,6 +1764,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
__ Ret();
if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm, SUDIV);
__ bind(&modulo_with_sdiv);
__ mov(scratch2, right);
// Perform modulus with sdiv and mls.
@@ -2130,7 +2073,14 @@ void BinaryOpStub_GenerateSmiCode(
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smis, call_runtime;
+ Label right_arg_changed, call_runtime;
+
+ if (op_ == Token::MOD && has_fixed_right_arg_) {
+ // It is guaranteed that the value will fit into a Smi, because if it
+ // didn't, we wouldn't be here, see BinaryOp_Patch.
+ __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
+ __ b(ne, &right_arg_changed);
+ }
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
@@ -2147,6 +2097,7 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
// Code falls through if the result is not returned as either a smi or heap
// number.
+ __ bind(&right_arg_changed);
GenerateTypeTransition(masm);
__ bind(&call_runtime);
@@ -2259,42 +2210,25 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
UNREACHABLE();
}
- if (op_ != Token::DIV) {
- // These operations produce an integer result.
- // Try to return a smi if we can.
- // Otherwise return a heap number if allowed, or jump to type
- // transition.
-
- if (result_type_ <= BinaryOpIC::INT32) {
- __ TryDoubleToInt32Exact(scratch1, d5, d8);
- // If the ne condition is set, result does
- // not fit in a 32-bit integer.
- __ b(ne, &transition);
- } else {
- __ vcvt_s32_f64(s8, d5);
- __ vmov(scratch1, s8);
- }
-
- // Check if the result fits in a smi.
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- // If not try to return a heap number.
- __ b(mi, &return_heap_number);
- // Check for minus zero. Return heap number for minus zero if
- // double results are allowed; otherwise transition.
+ if (result_type_ <= BinaryOpIC::INT32) {
+ __ TryDoubleToInt32Exact(scratch1, d5, d8);
+ // If the ne condition is set, result does
+ // not fit in a 32-bit integer.
+ __ b(ne, &transition);
+ // Try to tag the result as a Smi, return heap number on overflow.
+ __ SmiTag(scratch1, SetCC);
+ __ b(vs, &return_heap_number);
+ // Check for minus zero, transition in that case (because we need
+ // to return a heap number).
Label not_zero;
- __ cmp(scratch1, Operand::Zero());
+ ASSERT(kSmiTag == 0);
__ b(ne, &not_zero);
__ vmov(scratch2, d5.high());
__ tst(scratch2, Operand(HeapNumber::kSignMask));
- __ b(ne, result_type_ <= BinaryOpIC::INT32 ? &transition
- : &return_heap_number);
+ __ b(ne, &transition);
__ bind(&not_zero);
-
- // Tag the result and return.
- __ SmiTag(r0, scratch1);
+ __ mov(r0, scratch1);
__ Ret();
- } else {
- // DIV just falls through to allocating a heap number.
}
__ bind(&return_heap_number);
@@ -2318,6 +2252,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// to type transition.
} else {
+ if (has_fixed_right_arg_) {
+ __ Vmov(d8, fixed_right_arg_value(), scratch1);
+ __ VFPCompareAndSetFlags(d1, d8);
+ __ b(ne, &transition);
+ }
+
// We preserved r0 and r1 to be able to call runtime.
// Save the left value on the stack.
__ Push(r5, r4);
@@ -4689,7 +4629,6 @@ static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// megamorphic.
// r1 : the function to call
// r2 : cache cell for call target
- ASSERT(!FLAG_optimize_constructed_arrays);
Label done;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@@ -7336,6 +7275,10 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
+ if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ }
}
}
@@ -7350,6 +7293,21 @@ void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
}
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
@@ -7436,6 +7394,105 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ tst(r0, r0);
+ __ b(ne, &not_zero_case);
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ bind(&not_zero_case);
+ __ cmp(r0, Operand(1));
+ __ b(gt, &not_one_case);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ ldr(r3, MemOperand(sp, 0));
+ __ cmp(r3, Operand::Zero());
+ __ b(eq, &normal_sequence);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+ }
+
+ __ bind(&normal_sequence);
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ bind(&not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- r1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function");
+ __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ Assert(eq, "Unexpected initial map for Array function");
+ }
+
+ if (FLAG_optimize_constructed_arrays) {
+ // Figure out the right elements kind
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmp(r3, Operand(FAST_ELEMENTS));
+ __ b(eq, &done);
+ __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
+ __ Assert(eq,
+ "Invalid ElementsKind for InternalArray or InternalPackedArray");
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmp(r3, Operand(FAST_ELEMENTS));
+ __ b(eq, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 7bf253a333..5b2980aeb4 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -504,50 +504,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
}
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ SmiTst(index);
- __ Check(eq, "Non-smi index");
- __ SmiTst(value);
- __ Check(eq, "Non-smi value");
-
- __ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
- __ cmp(index, ip);
- __ Check(lt, "Index is too large");
-
- __ cmp(index, Operand(Smi::FromInt(0)));
- __ Check(ge, "Index is negative");
-
- __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
-
- __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, "Unexpected string type");
- }
-
- __ add(ip,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ SmiUntag(value, value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
- __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
- } else {
- // No need to untag a smi for two-byte addressing.
- __ strh(value, MemOperand(ip, index)); // LSL(1 - kSmiTagSize).
- }
-}
-
-
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 75899a948e..c020ab601c 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -51,7 +51,7 @@ class CodeGenerator: public AstVisitor {
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
+ static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index d973889bbe..ea3287aa33 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -48,7 +48,7 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 33a499c275..8b24bf10c9 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -678,8 +678,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub, condition->test_id());
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -1081,9 +1081,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
@@ -1259,6 +1258,65 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(eq, loop_statement.break_label());
+ __ CompareRoot(r0, Heap::kNullValueRootIndex);
+ __ b(eq, loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(r0, &convert);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &done_convert);
+ __ bind(&convert);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+ __ push(r0);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@@ -1971,10 +2029,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 1 * kPointerSize] iter
// [sp + 0 * kPointerSize] g
- Label l_catch, l_try, l_resume, l_send, l_call, l_loop;
+ Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
// Initial send value is undefined.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(&l_send);
+ __ b(&l_next);
// catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
__ bind(&l_catch);
@@ -1983,11 +2041,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(r3); // iter
__ push(r0); // exception
__ mov(r0, r3); // iter
- __ push(r0); // push LoadIC state
__ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(throw_ic); // iter.throw in r0
- __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state
__ jmp(&l_call);
// try { received = yield result.value }
@@ -2007,17 +2063,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_resume); // received in r0
__ PopTryHandler();
- // receiver = iter; f = iter.send; arg = received;
- __ bind(&l_send);
+ // receiver = iter; f = iter.next; arg = received;
+ __ bind(&l_next);
__ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
__ push(r3); // iter
__ push(r0); // received
__ mov(r0, r3); // iter
- __ push(r0); // push LoadIC state
- __ LoadRoot(r2, Heap::ksend_stringRootIndex); // "send"
- Handle<Code> send_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(send_ic); // iter.send in r0
- __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state
+ __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next"
+ Handle<Code> next_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(next_ic); // iter.next in r0
// result = f.call(receiver, arg);
__ bind(&l_call);
@@ -2045,13 +2099,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(r1); // result
__ push(r0); // result.value
__ mov(r0, r1); // result
- __ push(r0); // push LoadIC state
__ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done"
Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(done_ic); // result.done in r0
- __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state
- ToBooleanStub stub(r0);
- __ CallStub(&stub);
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
__ cmp(r0, Operand(0));
__ b(eq, &l_try);
@@ -2122,7 +2174,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
- if (resume_mode == JSGeneratorObject::SEND) {
+ if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
__ cmp(r3, Operand(0));
__ b(ne, &slow_resume);
@@ -3013,7 +3065,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// string "valueOf" the result is false.
// The use of ip to store the valueOf string assumes that it is not otherwise
// used in the loop below.
- __ mov(ip, Operand(FACTORY->value_of_string()));
+ __ mov(ip, Operand(isolate()->factory()->value_of_string()));
__ jmp(&entry);
__ bind(&loop);
__ ldr(r3, MemOperand(r4, 0));
@@ -3425,19 +3477,56 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ __ SmiTst(index);
+ __ Check(eq, "Non-smi index");
+ __ SmiTst(value);
+ __ Check(eq, "Non-smi value");
+
+ __ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
+ __ cmp(index, ip);
+ __ Check(lt, "Index is too large");
+
+ __ cmp(index, Operand(Smi::FromInt(0)));
+ __ Check(ge, "Index is negative");
+
+ __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+ __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ __ cmp(ip, Operand(encoding_mask));
+ __ Check(eq, "Unexpected string type");
+}
+
+
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
+ Register string = r0;
+ Register index = r1;
+ Register value = r2;
+
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(r2);
- __ pop(r1);
+ __ pop(value);
+ __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
- context()->Plug(r0);
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
+ __ SmiUntag(value, value);
+ __ add(ip,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
+ context()->Plug(string);
}
@@ -3445,15 +3534,28 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
+ Register string = r0;
+ Register index = r1;
+ Register value = r2;
+
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(r2);
- __ pop(r1);
+ __ pop(value);
+ __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
- context()->Plug(r0);
+ if (FLAG_debug_code) {
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ }
+
+ __ SmiUntag(value, value);
+ __ add(ip,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ strh(value, MemOperand(ip, index));
+ context()->Plug(string);
}
@@ -4663,9 +4765,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- EqualityKind kind = expr->op() == Token::EQ_STRICT
- ? kStrictEquality : kNonStrictEquality;
- if (kind == kStrictEquality) {
+ if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
@@ -4673,9 +4773,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
__ cmp(r0, r1);
Split(eq, if_true, if_false, fall_through);
} else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
- kNonStrictEquality,
- nil);
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
__ cmp(r0, Operand(0));
Split(ne, if_true, if_false, fall_through);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 14c4794f4f..87865b2f67 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -646,15 +646,11 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
}
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
// Probe the stub cache.
@@ -674,7 +670,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
Label miss;
@@ -695,7 +690,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
Isolate* isolate = masm->isolate();
@@ -711,6 +705,20 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- r0 : receiver
+ // -----------------------------------
+
+ __ mov(r3, r0);
+ __ Push(r3, r2);
+
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@@ -878,9 +886,6 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
}
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- lr : return address
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index e1bb69eacd..fbb9c6ef8b 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -369,8 +369,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ hydrogen()->access().PrintTo(stream);
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -406,7 +405,14 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
} else {
stream->Add("] <- ");
}
- value()->PrintTo(stream);
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
}
@@ -699,6 +705,12 @@ LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
}
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -711,9 +723,9 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ if (instr->representation().IsSmiOrTagged()) {
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
@@ -781,8 +793,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
+ ASSERT(left->representation().IsSmiOrTagged());
+ ASSERT(right->representation().IsSmiOrTagged());
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
LArithmeticT* result =
@@ -1304,9 +1316,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
@@ -1333,18 +1345,14 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()));
+ new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
return AssignEnvironment(DefineSameAsFirst(div));
}
- // TODO(1042) The fixed register allocation
- // is needed because we call TypeRecordingBinaryOpStub from
- // the generated code, which requires registers r0
- // and r1 to be used. We should remove that
- // when we provide a native implementation.
- LOperand* dividend = UseFixed(instr->left(), r0);
- LOperand* divisor = UseFixed(instr->right(), r1);
- return AssignEnvironment(AssignPointerMap(
- DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
+ LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1434,43 +1442,61 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LModI* mod;
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
- } else {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- mod = new(zone()) LModI(dividend,
- divisor,
- TempRegister(),
- FixedTemp(d10),
- FixedTemp(d11));
- }
-
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kCanOverflow)) {
+ ASSERT(!right->CanBeZero());
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseOrConstant(right));
+ LInstruction* result = DefineAsRegister(mod);
+ return (left->CanBeNegative() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero))
+ ? AssignEnvironment(result)
+ : result;
+ } else if (instr->has_fixed_right_arg()) {
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
return AssignEnvironment(DefineAsRegister(mod));
+ } else if (CpuFeatures::IsSupported(SUDIV)) {
+ LModI* mod = new(zone()) LModI(UseRegister(left),
+ UseRegister(right));
+ LInstruction* result = DefineAsRegister(mod);
+ return (right->CanBeZero() ||
+ (left->RangeCanInclude(kMinInt) &&
+ right->RangeCanInclude(-1) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
+ (left->CanBeNegative() &&
+ instr->CanBeZero() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)))
+ ? AssignEnvironment(result)
+ : result;
} else {
- return DefineAsRegister(mod);
+ LModI* mod = new(zone()) LModI(UseRegister(left),
+ UseRegister(right),
+ FixedTemp(d10),
+ FixedTemp(d11));
+ LInstruction* result = DefineAsRegister(mod);
+ return (right->CanBeZero() ||
+ (left->CanBeNegative() &&
+ instr->CanBeZero() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)))
+ ? AssignEnvironment(result)
+ : result;
}
- } else if (instr->representation().IsTagged()) {
+ } else if (instr->representation().IsSmiOrTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = UseFixedDouble(instr->right(), d2);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, d1), instr);
+ LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
+ UseFixedDouble(left, d1),
+ UseFixedDouble(right, d2));
+ return MarkAsCall(DefineFixedDouble(mod, d1), instr);
}
}
@@ -1618,7 +1644,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1682,9 +1708,10 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(
+ instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCmpIDAndBranch(left, right);
@@ -1887,12 +1914,26 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
if (from.IsTagged()) {
if (to.IsDouble()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = NULL;
@@ -1927,6 +1968,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToSmi(value,
+ TempRegister(), TempRegister())));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
@@ -1949,6 +1994,15 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return result;
+ }
+ return AssignEnvironment(result);
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1986,18 +2040,6 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
}
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
@@ -2020,7 +2062,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
- ASSERT(input_rep.IsTagged());
+ ASSERT(input_rep.IsSmiOrTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve d1 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
@@ -2038,7 +2080,9 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
return DefineAsRegister(new(zone()) LConstantD);
@@ -2154,7 +2198,7 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
+ instr->key()->representation().IsSmi());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
@@ -2164,7 +2208,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (instr->representation().IsDouble()) {
obj = UseTempRegister(instr->elements());
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
result = new(zone()) LLoadKeyed(obj, key);
@@ -2214,7 +2258,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
val = UseTempRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
- ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
object = UseTempRegister(instr->elements());
val = needs_write_barrier ? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
@@ -2293,13 +2337,14 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
- obj = instr->is_in_object()
+ obj = is_in_object
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
@@ -2323,10 +2368,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject())) {
- return AssignEnvironment(result);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
}
return result;
}
@@ -2370,14 +2416,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result =
- new(zone()) LAllocateObject(TempRegister(), TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
@@ -2467,7 +2505,7 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
index = UseOrConstant(instr->index());
} else {
length = UseTempRegister(instr->length());
- index = Use(instr->index());
+ index = UseRegisterAtStart(instr->index());
}
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 9bcd44ae05..ccfd0dbece 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -49,7 +49,6 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
- V(AllocateObject) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -87,6 +86,7 @@ class LCodeGen;
V(CmpT) \
V(ConstantD) \
V(ConstantI) \
+ V(ConstantS) \
V(ConstantT) \
V(Context) \
V(DebugBreak) \
@@ -95,6 +95,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleToSmi) \
V(DummyUse) \
V(ElementsKind) \
V(FixedArrayBaseLength) \
@@ -111,6 +112,7 @@ class LCodeGen;
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Integer32ToSmi) \
V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
@@ -573,51 +575,39 @@ class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
};
-class LModI: public LTemplateInstruction<1, 2, 3> {
+class LModI: public LTemplateInstruction<1, 2, 2> {
public:
- // Used when the right hand is a constant power of 2.
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
- }
-
- // Used for the standard case.
LModI(LOperand* left,
LOperand* right,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* temp = NULL,
+ LOperand* temp2 = NULL) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
-class LDivI: public LTemplateInstruction<1, 2, 0> {
+class LDivI: public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* left, LOperand* right) {
+ LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
+ temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
@@ -1204,6 +1194,15 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
+class LConstantS: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
class LConstantD: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
@@ -1954,6 +1953,19 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
+class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@@ -2007,6 +2019,25 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
};
+class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDoubleToSmi(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
public:
@@ -2111,9 +2142,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream);
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
Representation representation() const {
return hydrogen()->field_representation();
@@ -2352,7 +2380,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
};
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2416,21 +2444,6 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
- public:
- LAllocateObject(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
class LAllocate: public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 09a0e9c066..96befb0c0d 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -181,6 +181,7 @@ bool LCodeGen::GeneratePrologue() {
__ add(fp, sp, Operand(2 * kPointerSize));
}
frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -518,13 +519,18 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
return constant->handle();
}
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
@@ -534,6 +540,12 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
double LCodeGen::ToDouble(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(constant->HasDoubleValue());
@@ -935,8 +947,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { ALLOW_HANDLE_DEREF(isolate(),
- "copying a ZoneList of handles into a FixedArray");
+ { AllowDeferredHandleDereference copy_handles;
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
@@ -1154,122 +1165,150 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
+ HMod* hmod = instr->hydrogen();
+ HValue* left = hmod->left();
+ HValue* right = hmod->right();
+ if (hmod->HasPowerOf2Divisor()) {
+ // TODO(svenpanne) We should really do the strength reduction on the
+ // Hydrogen level.
+ Register left_reg = ToRegister(instr->left());
+ Register result_reg = ToRegister(instr->result());
+
+ // Note: The code below even works when right contains kMinInt.
+ int32_t divisor = Abs(right->GetInteger32Constant());
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ cmp(left_reg, Operand::Zero());
+ __ b(pl, &left_is_not_negative);
+ __ rsb(result_reg, left_reg, Operand::Zero());
+ __ and_(result_reg, result_reg, Operand(divisor - 1));
+ __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ b(&done);
+ }
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ __ bind(&left_is_not_negative);
+ __ and_(result_reg, left_reg, Operand(divisor - 1));
+ __ bind(&done);
- if (divisor < 0) divisor = -divisor;
+ } else if (hmod->has_fixed_right_arg()) {
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
- Label positive_dividend, done;
- __ cmp(dividend, Operand::Zero());
- __ b(pl, &positive_dividend);
- __ rsb(result, dividend, Operand::Zero());
- __ and_(result, result, Operand(divisor - 1), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ int32_t divisor = hmod->fixed_right_arg_value();
+ ASSERT(IsPowerOf2(divisor));
+
+ // Check if our assumption of a fixed right operand still holds.
+ __ cmp(right_reg, Operand(divisor));
+ DeoptimizeIf(ne, instr->environment());
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ cmp(left_reg, Operand::Zero());
+ __ b(pl, &left_is_not_negative);
+ __ rsb(result_reg, left_reg, Operand::Zero());
+ __ and_(result_reg, result_reg, Operand(divisor - 1));
+ __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ b(&done);
}
- __ rsb(result, result, Operand::Zero());
- __ b(&done);
- __ bind(&positive_dividend);
- __ and_(result, dividend, Operand(divisor - 1));
+
+ __ bind(&left_is_not_negative);
+ __ and_(result_reg, left_reg, Operand(divisor - 1));
__ bind(&done);
- return;
- }
- // These registers hold untagged 32 bit values.
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- Register result = ToRegister(instr->result());
- Label done;
+ } else if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm(), SUDIV);
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm(), SUDIV);
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
+ Label done;
+ // Check for x % 0, sdiv might signal an exception. We have to deopt in this
+ // case because we can't return a NaN.
+ if (right->CanBeZero()) {
+ __ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
}
- // For r3 = r1 % r2; we can have the following ARM code
- // sdiv r3, r1, r2
- // mls r3, r3, r2, r1
+ // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ Label no_overflow_possible;
+ __ cmp(left_reg, Operand(kMinInt));
+ __ b(ne, &no_overflow_possible);
+ __ cmp(right_reg, Operand(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ } else {
+ __ b(ne, &no_overflow_possible);
+ __ mov(result_reg, Operand::Zero());
+ __ jmp(&done);
+ }
+ __ bind(&no_overflow_possible);
+ }
- __ sdiv(result, left, right);
- __ mls(result, result, right, left);
+ // For 'r3 = r1 % r2' we can have the following ARM code:
+ // sdiv r3, r1, r2
+ // mls r3, r3, r2, r1
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(result, Operand::Zero());
+ __ sdiv(result_reg, left_reg, right_reg);
+ __ mls(result_reg, result_reg, right_reg, left_reg);
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (left->CanBeNegative() &&
+ hmod->CanBeZero() &&
+ hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
- __ cmp(left, Operand::Zero());
+ __ cmp(left_reg, Operand::Zero());
DeoptimizeIf(lt, instr->environment());
}
+ __ bind(&done);
+
} else {
+ // General case, without any SDIV support.
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
Register scratch = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister dividend = ToDoubleRegister(instr->temp2());
- DwVfpRegister divisor = ToDoubleRegister(instr->temp3());
+ ASSERT(!scratch.is(left_reg));
+ ASSERT(!scratch.is(right_reg));
+ ASSERT(!scratch.is(result_reg));
+ DwVfpRegister dividend = ToDoubleRegister(instr->temp());
+ DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
+ ASSERT(!divisor.is(dividend));
DwVfpRegister quotient = double_scratch0();
+ ASSERT(!quotient.is(dividend));
+ ASSERT(!quotient.is(divisor));
- ASSERT(!dividend.is(divisor));
- ASSERT(!dividend.is(quotient));
- ASSERT(!divisor.is(quotient));
- ASSERT(!scratch.is(left));
- ASSERT(!scratch.is(right));
- ASSERT(!scratch.is(result));
-
- Label vfp_modulo, right_negative;
-
- __ Move(result, left);
-
- // (0 % x) must yield 0 (if x is finite, which is the case here).
- __ cmp(left, Operand::Zero());
- __ b(eq, &done);
- // Preload right in a vfp register.
- __ vmov(divisor.low(), right);
- __ b(lt, &vfp_modulo);
-
- __ cmp(left, Operand(right));
- __ b(lt, &done);
-
- // Check for (positive) power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
- scratch,
- &right_negative,
- &vfp_modulo);
- // Perform modulo operation (scratch contains right - 1).
- __ and_(result, scratch, Operand(left));
- __ b(&done);
-
- __ bind(&right_negative);
- // Negate right. The sign of the divisor does not matter.
- __ rsb(right, right, Operand::Zero());
+ Label done;
+ // Check for x % 0, we have to deopt in this case because we can't return a
+ // NaN.
+ if (right->CanBeZero()) {
+ __ cmp(right_reg, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
- __ bind(&vfp_modulo);
- // Load the arguments in VFP registers.
- // The divisor value is preloaded before. Be careful that 'right'
- // is only live on entry.
- __ vmov(dividend.low(), left);
- // From here on don't use right as it may have been reallocated
- // (for example to scratch2).
- right = no_reg;
+ __ Move(result_reg, left_reg);
+ // Load the arguments in VFP registers. The divisor value is preloaded
+ // before. Be careful that 'right_reg' is only live on entry.
+ // TODO(svenpanne) The last comments seems to be wrong nowadays.
+ __ vmov(dividend.low(), left_reg);
+ __ vmov(divisor.low(), right_reg);
__ vcvt_f64_s32(dividend, dividend.low());
__ vcvt_f64_s32(divisor, divisor.low());
- // We do not care about the sign of the divisor.
+ // We do not care about the sign of the divisor. Note that we still handle
+ // the kMinInt % -1 case correctly, though.
__ vabs(divisor, divisor);
// Compute the quotient and round it to a 32bit integer.
__ vdiv(quotient, dividend, divisor);
@@ -1281,22 +1320,18 @@ void LCodeGen::DoModI(LModI* instr) {
__ vmul(double_scratch, divisor, quotient);
__ vcvt_s32_f64(double_scratch.low(), double_scratch);
__ vmov(scratch, double_scratch.low());
+ __ sub(result_reg, left_reg, scratch, SetCC);
- if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ sub(result, left, scratch);
- } else {
- Label ok;
- // Check for -0.
- __ sub(scratch2, left, scratch, SetCC);
- __ b(ne, &ok);
- __ cmp(left, Operand::Zero());
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (left->CanBeNegative() &&
+ hmod->CanBeZero() &&
+ hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ b(ne, &done);
+ __ cmp(left_reg, Operand::Zero());
DeoptimizeIf(mi, instr->environment());
- __ bind(&ok);
- // Load the result and we are done.
- __ mov(result, scratch2);
}
+ __ bind(&done);
}
- __ bind(&done);
}
@@ -1395,25 +1430,9 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
void LCodeGen::DoDivI(LDivI* instr) {
- class DeferredDivI: public LDeferredCode {
- public:
- DeferredDivI(LCodeGen* codegen, LDivI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(),
- instr_->left(),
- instr_->right(),
- Token::DIV);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LDivI* instr_;
- };
-
if (instr->hydrogen()->HasPowerOf2Divisor()) {
Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
int32_t test_value = 0;
int32_t power = 0;
@@ -1436,10 +1455,19 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
if (test_value != 0) {
- // Deoptimize if remainder is not 0.
- __ tst(dividend, Operand(test_value));
- DeoptimizeIf(ne, instr->environment());
- __ mov(dividend, Operand(dividend, ASR, power));
+ if (instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ __ cmp(dividend, Operand(0));
+ __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
+ __ mov(dividend, Operand(dividend, ASR, power));
+ if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
+ return; // Don't fall through to "__ rsb" below.
+ } else {
+ // Deoptimize if remainder is not 0.
+ __ tst(dividend, Operand(test_value));
+ DeoptimizeIf(ne, instr->environment());
+ __ mov(dividend, Operand(dividend, ASR, power));
+ }
}
if (divisor < 0) __ rsb(dividend, dividend, Operand(0));
@@ -1476,40 +1504,38 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_min_int);
}
- Label done, deoptimize;
- // Test for a few common cases first.
- __ cmp(right, Operand(1));
- __ mov(result, left, LeaveCC, eq);
- __ b(eq, &done);
-
- __ cmp(right, Operand(2));
- __ tst(left, Operand(1), eq);
- __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
- __ b(eq, &done);
-
- __ cmp(right, Operand(4));
- __ tst(left, Operand(3), eq);
- __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
- __ b(eq, &done);
-
- // Call the stub. The numbers in r0 and r1 have
- // to be tagged to Smis. If that is not possible, deoptimize.
- DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
-
- __ TrySmiTag(left, &deoptimize);
- __ TrySmiTag(right, &deoptimize);
-
- __ b(al, deferred->entry());
- __ bind(deferred->exit());
-
- // If the result in r0 is a Smi, untag it, else deoptimize.
- __ JumpIfNotSmi(result, &deoptimize);
- __ SmiUntag(result);
- __ b(&done);
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ sdiv(result, left, right);
- __ bind(&deoptimize);
- DeoptimizeIf(al, instr->environment());
- __ bind(&done);
+ if (!instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ // Compute remainder and deopt if it's not zero.
+ const Register remainder = scratch0();
+ __ mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ DeoptimizeIf(ne, instr->environment());
+ }
+ } else {
+ const DoubleRegister vleft = ToDoubleRegister(instr->temp());
+ const DoubleRegister vright = double_scratch0();
+ __ vmov(vleft.low(), left);
+ __ vmov(vright.low(), right);
+ __ vcvt_f64_s32(vleft, vleft.low());
+ __ vcvt_f64_s32(vright, vright.low());
+ __ vdiv(vleft, vleft, vright); // vleft now contains the result.
+ __ vcvt_s32_f64(vright.low(), vleft);
+ __ vmov(result, vright.low());
+
+ if (!instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ // Deopt if exact conversion to integer was not possible.
+ // Use vright as scratch register.
+ __ vcvt_f64_s32(vright, vright.low());
+ __ VFPCompareAndSetFlags(vleft, vright);
+ DeoptimizeIf(ne, instr->environment());
+ }
+ }
}
@@ -1608,38 +1634,6 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
}
-void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
- LOperand* left_argument,
- LOperand* right_argument,
- Token::Value op) {
- Register left = ToRegister(left_argument);
- Register right = ToRegister(right_argument);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
- // Move left to r1 and right to r0 for the stub call.
- if (left.is(r1)) {
- __ Move(r0, right);
- } else if (left.is(r0) && right.is(r1)) {
- __ Swap(r0, r1, r2);
- } else if (left.is(r0)) {
- ASSERT(!right.is(r1));
- __ mov(r1, r0);
- __ mov(r0, right);
- } else {
- ASSERT(!left.is(r0) && !right.is(r0));
- __ mov(r0, right);
- __ mov(r1, left);
- }
- BinaryOpStub stub(op, OVERWRITE_LEFT);
- __ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(pointer_map,
- 0,
- Safepoint::kNoLazyDeopt);
- // Overwrite the stored value of r0 with the result of the stub.
- __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
-}
-
-
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
@@ -1889,7 +1883,11 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
__ mov(ToRegister(instr->result()), Operand(instr->value()));
}
@@ -1904,7 +1902,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (value->IsSmi()) {
__ mov(ToRegister(instr->result()), Operand(value));
} else {
@@ -2003,11 +2001,34 @@ void LCodeGen::DoDateField(LDateField* instr) {
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
+ Register string = ToRegister(instr->string());
+ Register index = ToRegister(instr->index());
+ Register value = ToRegister(instr->value());
+ String::Encoding encoding = instr->encoding();
+
+ if (FLAG_debug_code) {
+ __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+ __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, "Unexpected string type");
+ }
+
+ __ add(ip,
+ string,
+ Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ strb(value, MemOperand(ip, index));
+ } else {
+ // MemOperand with ip as the base register is not allowed for strh, so
+ // we do the address calculation explicitly.
+ __ add(ip, ip, Operand(index, LSL, 1));
+ __ strh(value, MemOperand(ip));
+ }
}
@@ -2207,11 +2228,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
+ if (r.IsInteger32() || r.IsSmi()) {
+ ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ cmp(reg, Operand::Zero());
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
+ ASSERT(!info()->IsStub());
DwVfpRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
__ VFPCompareAndSetFlags(reg, 0.0);
@@ -2222,9 +2245,11 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
EmitBranch(true_block, false_block, eq);
} else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
__ cmp(reg, Operand::Zero());
EmitBranch(true_block, false_block, ne);
} else {
@@ -2386,11 +2411,19 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
__ b(vs, chunk_->GetAssemblyLabel(false_block));
} else {
if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left),
- Operand(ToInteger32(LConstantOperand::cast(right))));
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
+ } else {
+ __ cmp(ToRegister(left), Operand(value));
+ }
} else if (left->IsConstantOperand()) {
- __ cmp(ToRegister(right),
- Operand(ToInteger32(LConstantOperand::cast(left))));
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
+ } else {
+ __ cmp(ToRegister(right), Operand(value));
+ }
// We transposed the operands. Reverse the condition.
cond = ReverseCondition(cond);
} else {
@@ -2905,8 +2938,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
no_frame_start = masm_->pc_offset();
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
}
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
@@ -3045,7 +3078,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- int offset = instr->hydrogen()->offset();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Register object = ToRegister(instr->object());
if (instr->hydrogen()->representation().IsDouble()) {
DwVfpRegister result = ToDoubleRegister(instr->result());
@@ -3054,7 +3088,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
+ if (access.IsInobject()) {
__ ldr(result, FieldMemOperand(object, offset));
} else {
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
@@ -3123,8 +3157,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
Label check_passed;
- __ CompareMap(
- object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CompareMap(object_map, map, &check_passed);
if (last && !need_generic) {
DeoptimizeIf(ne, instr->environment());
__ bind(&check_passed);
@@ -3249,7 +3282,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int additional_offset = instr->additional_index() << element_size_shift;
@@ -3321,7 +3354,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
Register scratch = scratch0();
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int constant_key = 0;
if (key_is_constant) {
@@ -3366,7 +3399,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
@@ -3924,7 +3957,10 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->left()).is(d1));
ASSERT(ToDoubleRegister(instr->result()).is(d3));
- if (exponent_type.IsTagged()) {
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(r2, &no_deopt);
__ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
@@ -4176,14 +4212,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ mov(r0, Operand(instr->arity()));
__ mov(r2, Operand(instr->hydrogen()->property_cell()));
ElementsKind kind = instr->hydrogen()->elements_kind();
+ bool disable_allocation_sites =
+ (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind);
+ ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
- ArraySingleArgumentConstructorStub stub(kind);
+ ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else {
- ArrayNArgumentsConstructorStub stub(kind);
+ ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4206,17 +4245,13 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Register scratch = scratch0();
- int offset = instr->offset();
+
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Handle<Map> transition = instr->transition();
- if (FLAG_track_fields && representation.IsSmi()) {
- Register value = ToRegister(instr->value());
- __ SmiTag(value, value, SetCC);
- if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(vs, instr->environment());
- }
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ SmiTst(value);
@@ -4224,7 +4259,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
- ASSERT(instr->is_in_object());
+ ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
DwVfpRegister value = ToDoubleRegister(instr->value());
__ vstr(value, FieldMemOperand(object, offset));
@@ -4257,7 +4292,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
+ if (access.IsInobject()) {
__ str(value, FieldMemOperand(object, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
@@ -4308,7 +4343,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
__ mov(ip, Operand(Smi::FromInt(constant_index)));
} else {
__ mov(ip, Operand(constant_index));
@@ -4336,7 +4371,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int additional_offset = instr->additional_index() << element_size_shift;
@@ -4409,7 +4444,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
Operand operand = key_is_constant
? Operand((constant_key << element_size_shift) +
@@ -4455,7 +4490,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
@@ -4702,6 +4737,19 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* output = instr->result();
+ ASSERT(output->IsRegister());
+ __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(vs, instr->environment());
+ }
+}
+
+
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4913,7 +4961,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
DwVfpRegister result_reg,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
@@ -4923,7 +4971,9 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Label load_smi, heap_number, done;
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
+ NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
@@ -4931,17 +4981,23 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- if (deoptimize_on_undefined) {
+ if (!allow_undefined_as_nan) {
DeoptimizeIf(ne, env);
} else {
- Label heap_number;
+ Label heap_number, convert;
__ b(eq, &heap_number);
+ // Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
+ __ b(eq, &convert);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(input_reg, Operand(ip));
+ }
DeoptimizeIf(ne, env);
- // Convert undefined to NaN.
+ __ bind(&convert);
__ LoadRoot(ip, Heap::kNanValueRootIndex);
__ sub(ip, ip, Operand(kHeapObjectTag));
__ vldr(result_reg, ip, HeapNumber::kValueOffset);
@@ -4961,15 +5017,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(eq, env);
}
__ jmp(&done);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ SmiUntag(scratch, input_reg, SetCC);
- DeoptimizeIf(cs, env);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- __ Vmov(result_reg,
- FixedDoubleArray::hole_nan_as_double(),
- no_reg);
- __ b(&done);
} else {
__ SmiUntag(scratch, input_reg);
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
@@ -5093,24 +5140,18 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ } else if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ if (load->hole_mode() == ALLOW_RETURN_HOLE) {
+ mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
}
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
}
}
EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->allow_undefined_as_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment(),
mode);
@@ -5124,7 +5165,33 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
DwVfpRegister double_input = ToDoubleRegister(instr->value());
DwVfpRegister double_scratch = double_scratch0();
- Label done;
+ if (instr->truncating()) {
+ Register scratch3 = ToRegister(instr->temp2());
+ __ ECMAToInt32(result_reg, double_input,
+ scratch1, scratch2, scratch3, double_scratch);
+ } else {
+ __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
+ DeoptimizeIf(ne, instr->environment());
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand::Zero());
+ __ b(ne, &done);
+ __ vmov(scratch1, double_input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+ __ bind(&done);
+ }
+ }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->temp());
+ DwVfpRegister double_input = ToDoubleRegister(instr->value());
+ DwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
@@ -5134,8 +5201,18 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
DeoptimizeIf(ne, instr->environment());
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand::Zero());
+ __ b(ne, &done);
+ __ vmov(scratch1, double_input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+ __ bind(&done);
+ }
}
- __ bind(&done);
+ __ SmiTag(result_reg, SetCC);
+ DeoptimizeIf(vs, instr->environment());
}
@@ -5199,7 +5276,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Register reg = ToRegister(instr->value());
Handle<JSFunction> target = instr->hydrogen()->target();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell =
@@ -5216,10 +5293,9 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
void LCodeGen::DoCheckMapCommon(Register map_reg,
Handle<Map> map,
- CompareMapMode mode,
LEnvironment* env) {
Label success;
- __ CompareMap(map_reg, map, &success, mode);
+ __ CompareMap(map_reg, map, &success);
DeoptimizeIf(ne, env);
__ bind(&success);
}
@@ -5236,11 +5312,11 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
- __ CompareMap(map_reg, map, &success, REQUIRE_EXACT_MAP);
+ __ CompareMap(map_reg, map, &success);
__ b(eq, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(map_reg, map, instr->environment());
__ bind(&success);
}
@@ -5314,89 +5390,12 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
__ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
- DoCheckMapCommon(map_reg,
- maps->at(i),
- ALLOW_ELEMENT_TRANSITION_MAPS,
- instr->environment());
+ DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
}
}
}
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Register scratch2 = ToRegister(instr->temp2());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(map, constructor);
- __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
- __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ str(scratch, FieldMemOperand(result, property_offset));
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ mov(r0, Operand(Smi::FromInt(instance_size)));
- __ push(r0);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -5421,8 +5420,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
}
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
@@ -5460,11 +5463,12 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
- CallRuntimeFromDeferred(
- Runtime::kAllocateInOldPointerSpace, 1, instr);
+ ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace());
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
+ } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) {
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
} else {
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
}
__ StoreToSafepointRegisterSlot(r0, result);
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 1a34169ebf..f264259f0a 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -119,6 +119,7 @@ class LCodeGen BASE_EMBEDDED {
SwVfpRegister flt_scratch,
DwVfpRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
@@ -126,6 +127,7 @@ class LCodeGen BASE_EMBEDDED {
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
@@ -138,10 +140,6 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code);
// Deferred code support.
- void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
- LOperand* left_argument,
- LOperand* right_argument,
- Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
@@ -155,13 +153,11 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoCheckMapCommon(Register map_reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ void DoCheckMapCommon(Register map_reg, Handle<Map> map, LEnvironment* env);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -334,7 +330,7 @@ class LCodeGen BASE_EMBEDDED {
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitNumberUntagD(Register input,
DwVfpRegister result,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode);
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index 596d58f470..352fbb90ca 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -248,7 +248,9 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32(constant_source)) {
+ if (cgen_->IsSmi(constant_source)) {
+ __ mov(dst, Operand(cgen_->ToSmi(constant_source)));
+ } else if (cgen_->IsInteger32(constant_source)) {
__ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
@@ -256,7 +258,9 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsInteger32(constant_source)) {
+ if (cgen_->IsSmi(constant_source)) {
+ __ mov(kSavedValueRegister, Operand(cgen_->ToSmi(constant_source)));
+ } else if (cgen_->IsInteger32(constant_source)) {
__ mov(kSavedValueRegister,
Operand(cgen_->ToInteger32(constant_source)));
} else {
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index a3b21a2bd5..f3cfdc76a9 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -74,7 +74,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
@@ -163,7 +163,7 @@ int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id,
Condition cond) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
@@ -181,7 +181,7 @@ void MacroAssembler::Call(Handle<Code> code,
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
// 'code' is always generated ARM code, never THUMB code
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
@@ -398,7 +398,7 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
@@ -2105,32 +2105,16 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
void MacroAssembler::CompareMap(Register obj,
Register scratch,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
+ Label* early_success) {
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMap(scratch, map, early_success, mode);
+ CompareMap(scratch, map, early_success);
}
void MacroAssembler::CompareMap(Register obj_map,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
+ Label* early_success) {
cmp(obj_map, Operand(map));
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- b(eq, early_success);
- cmp(obj_map, Operand(Handle<Map>(current_map)));
- }
- }
- }
}
@@ -2138,14 +2122,13 @@ void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
- CompareMap(obj, scratch, map, &success, mode);
+ CompareMap(obj, scratch, map, &success);
b(ne, fail);
bind(&success);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 50f53b3168..11d3066b91 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -162,7 +162,7 @@ class MacroAssembler: public Assembler {
void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
@@ -884,15 +884,13 @@ class MacroAssembler: public Assembler {
void CompareMap(Register obj,
Register scratch,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* early_success);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
void CompareMap(Register obj_map,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* early_success);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -902,8 +900,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ SmiCheckType smi_check_type);
void CheckMap(Register obj,
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index da7afee3fb..f05cba521e 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -122,7 +122,7 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -235,54 +235,6 @@ void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) {
}
-void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- if (on_failure == NULL) {
- // Instead of inlining a backtrack for each test, (re)use the global
- // backtrack target.
- on_failure = &backtrack_label_;
- }
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- if (cp_offset != 0) {
- int byte_offset = cp_offset * char_size();
- __ add(r0, r0, Operand(byte_offset));
- }
-
- // r0 : Address of characters to match against str.
- int stored_high_byte = 0;
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
- __ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
- ASSERT(str[i] <= String::kMaxOneByteCharCode);
- __ cmp(r1, Operand(str[i]));
- } else {
- __ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
- uc16 match_char = str[i];
- int match_high_byte = (match_char >> 8);
- if (match_high_byte == 0) {
- __ cmp(r1, Operand(str[i]));
- } else {
- if (match_high_byte != stored_high_byte) {
- __ mov(r2, Operand(match_high_byte));
- stored_high_byte = match_high_byte;
- }
- __ add(r3, r2, Operand(match_char & 0xff));
- __ cmp(r1, r3);
- }
- }
- BranchOrBacktrack(ne, on_failure);
- }
-}
-
-
void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
__ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
__ cmp(current_input_offset(), r0);
@@ -556,7 +508,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
case 'd':
// Match ASCII digits ('0'..'9')
__ sub(r0, current_character(), Operand('0'));
- __ cmp(current_character(), Operand('9' - '0'));
+ __ cmp(r0, Operand('9' - '0'));
BranchOrBacktrack(hi, on_no_match);
return true;
case 'D':
@@ -917,9 +869,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
- Handle<Code> code = FACTORY->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index 921d8f5474..1825752ebc 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -53,10 +53,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index b0de014511..3595b5233f 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -462,7 +462,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -581,6 +581,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
index -= object->map()->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -606,7 +608,9 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
name_reg,
scratch1,
kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
} else {
// Write to the properties array.
@@ -636,7 +640,9 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
name_reg,
receiver_reg,
kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
}
@@ -665,7 +671,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -723,6 +729,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
// TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -740,7 +748,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
name_reg,
scratch1,
kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
} else {
// Write to the properties array.
@@ -762,7 +772,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
name_reg,
receiver_reg,
kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
}
@@ -881,11 +893,12 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[4] : callee JS function
// -- sp[8] : call data
// -- sp[12] : isolate
- // -- sp[16] : ReturnValue
- // -- sp[20] : last JS argument
+ // -- sp[16] : ReturnValue default value
+ // -- sp[20] : ReturnValue
+ // -- sp[24] : last JS argument
// -- ...
- // -- sp[(argc + 4) * 4] : first JS argument
- // -- sp[(argc + 5) * 4] : receiver
+ // -- sp[(argc + 5) * 4] : first JS argument
+ // -- sp[(argc + 6) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@@ -902,13 +915,14 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
__ Move(r6, call_data);
}
__ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
- // Store JS function, call data, isolate and ReturnValue.
+ // Store JS function, call data, isolate ReturnValue default and ReturnValue.
__ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ str(r5, MemOperand(sp, 4 * kPointerSize));
+ __ str(r5, MemOperand(sp, 5 * kPointerSize));
// Prepare arguments.
- __ add(r2, sp, Operand(4 * kPointerSize));
+ __ add(r2, sp, Operand(5 * kPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -1247,8 +1261,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
Handle<Map> current_map(current->map());
// CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
} else {
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
}
@@ -1285,7 +1298,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
// Check the holder map.
__ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
@@ -1422,10 +1435,12 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
}
__ Push(reg, scratch3());
- __ mov(scratch3(),
+ __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+ __ mov(scratch4(), scratch3());
+ __ Push(scratch3(), scratch4());
+ __ mov(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
- __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
- __ Push(scratch3(), scratch4(), name());
+ __ Push(scratch4(), name());
__ mov(r0, sp); // r0 = Handle<Name>
const int kApiStackSpace = 1;
@@ -1451,7 +1466,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ CallApiFunctionAndReturn(ref,
kStackUnwindSpace,
returns_handle,
- 3);
+ 5);
}
@@ -2797,7 +2812,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
// Check that the map of the object hasn't changed.
__ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -3080,151 +3095,6 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- r0 : argc
- // -- r1 : constructor
- // -- lr : return address
- // -- [sp] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r7 for holding undefined which is used in several places below.
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(r2, r7);
- __ b(ne, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- // r7: undefined
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &generic_stub_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ Check(ne, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- ASSERT(function->has_initial_map());
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-#ifdef DEBUG
- int instance_size = function->initial_map()->instance_size();
- __ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
- __ Check(eq, "Instance size of initial map changed.");
-#endif
- __ Allocate(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r7: undefined
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Calculate the location of the first argument. The stack contains only the
- // argc arguments.
- __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- // Fill all the in-object properties with undefined.
- // r0: argc
- // r1: first argument
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- // r7: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed, next;
- // Check if the argument assigned to the property is actually passed.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ cmp(r0, Operand(arg_number));
- __ b(le, &not_passed);
- // Argument passed - find it on the stack.
- __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- __ b(&next);
- __ bind(&not_passed);
- // Set the property to undefined.
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- __ bind(&next);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
- isolate());
- __ mov(r2, Operand(constant));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- }
-
- // r0: argc
- // r4: JSObject (not tagged)
- // Move argc to r1 and the JSObject to return to r0 and tag it.
- __ mov(r1, r0);
- __ mov(r0, r4);
- __ orr(r0, r0, Operand(kHeapObjectTag));
-
- // r0: JSObject
- // r1: argc
- // Remove caller arguments and receiver from the stack and return.
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
- __ add(sp, sp, Operand(kPointerSize));
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
- __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
- __ Jump(lr);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 599fd5cfe9..5f89ebb7a6 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -395,6 +395,23 @@ function ArrayJoin(separator) {
}
+function ObservedArrayPop(n) {
+ n--;
+ var value = this[n];
+
+ EnqueueSpliceRecord(this, n, [value], 0);
+
+ try {
+ BeginPerformSplice(this);
+ delete this[n];
+ this.length = n;
+ } finally {
+ EndPerformSplice(this);
+ }
+
+ return value;
+}
+
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
@@ -408,6 +425,10 @@ function ArrayPop() {
this.length = n;
return;
}
+
+ if (%IsObserved(this))
+ return ObservedArrayPop.call(this, n);
+
n--;
var value = this[n];
delete this[n];
@@ -420,11 +441,10 @@ function ObservedArrayPush() {
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
- EnqueueSpliceRecord(this, n, [], 0, m);
+ EnqueueSpliceRecord(this, n, [], m);
try {
BeginPerformSplice(this);
-
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
}
@@ -558,6 +578,22 @@ function ArrayReverse() {
}
+function ObservedArrayShift(len) {
+ var first = this[0];
+
+ EnqueueSpliceRecord(this, 0, [first], 0);
+
+ try {
+ BeginPerformSplice(this);
+ SimpleMove(this, 0, 1, len, 0);
+ this.length = len - 1;
+ } finally {
+ EndPerformSplice(this);
+ }
+
+ return first;
+}
+
function ArrayShift() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
@@ -571,9 +607,12 @@ function ArrayShift() {
return;
}
+ if (%IsObserved(this))
+ return ObservedArrayShift.call(this, len);
+
var first = this[0];
- if (IS_ARRAY(this) && !%IsObserved(this)) {
+ if (IS_ARRAY(this)) {
SmartMove(this, 0, 1, len, 0);
} else {
SimpleMove(this, 0, 1, len, 0);
@@ -584,6 +623,25 @@ function ArrayShift() {
return first;
}
+function ObservedArrayUnshift() {
+ var len = TO_UINT32(this.length);
+ var num_arguments = %_ArgumentsLength();
+
+ EnqueueSpliceRecord(this, 0, [], num_arguments);
+
+ try {
+ BeginPerformSplice(this);
+ SimpleMove(this, 0, 0, len, num_arguments);
+ for (var i = 0; i < num_arguments; i++) {
+ this[i] = %_Arguments(i);
+ }
+ this.length = len + num_arguments;
+ } finally {
+ EndPerformSplice(this);
+ }
+
+ return len + num_arguments;
+}
function ArrayUnshift(arg1) { // length == 1
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
@@ -591,10 +649,13 @@ function ArrayUnshift(arg1) { // length == 1
["Array.prototype.unshift"]);
}
+ if (%IsObserved(this))
+ return ObservedArrayUnshift.apply(this, arguments);
+
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
- if (IS_ARRAY(this) && !%IsObserved(this)) {
+ if (IS_ARRAY(this)) {
SmartMove(this, 0, 0, len, num_arguments);
} else {
SimpleMove(this, 0, 0, len, num_arguments);
@@ -655,52 +716,99 @@ function ArraySlice(start, end) {
}
-function ArraySplice(start, delete_count) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.splice"]);
- }
-
- var num_arguments = %_ArgumentsLength();
-
- var len = TO_UINT32(this.length);
- var start_i = TO_INTEGER(start);
-
+function ComputeSpliceStartIndex(start_i, len) {
if (start_i < 0) {
start_i += len;
- if (start_i < 0) start_i = 0;
- } else {
- if (start_i > len) start_i = len;
+ return start_i < 0 ? 0 : start_i;
}
+ return start_i > len ? len : start_i;
+}
+
+
+function ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i) {
// SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
// given as a request to delete all the elements from the start.
// And it differs from the case of undefined delete count.
// This does not follow ECMA-262, but we do the same for
// compatibility.
var del_count = 0;
- if (num_arguments == 1) {
- del_count = len - start_i;
- } else {
- del_count = TO_INTEGER(delete_count);
- if (del_count < 0) del_count = 0;
- if (del_count > len - start_i) del_count = len - start_i;
- }
+ if (num_arguments == 1)
+ return len - start_i;
+
+ del_count = TO_INTEGER(delete_count);
+ if (del_count < 0)
+ return 0;
+
+ if (del_count > len - start_i)
+ return len - start_i;
+
+ return del_count;
+}
+
+function ObservedArraySplice(start, delete_count) {
+ var num_arguments = %_ArgumentsLength();
+ var len = TO_UINT32(this.length);
+ var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
+ var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
+ start_i);
var deleted_elements = [];
deleted_elements.length = del_count;
+ var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
+
+ try {
+ BeginPerformSplice(this);
- // Number of elements to add.
- var num_additional_args = 0;
- if (num_arguments > 2) {
- num_additional_args = num_arguments - 2;
+ SimpleSlice(this, start_i, del_count, len, deleted_elements);
+ SimpleMove(this, start_i, del_count, len, num_elements_to_add);
+
+ // Insert the arguments into the resulting array in
+ // place of the deleted elements.
+ var i = start_i;
+ var arguments_index = 2;
+ var arguments_length = %_ArgumentsLength();
+ while (arguments_index < arguments_length) {
+ this[i++] = %_Arguments(arguments_index++);
+ }
+ this.length = len - del_count + num_elements_to_add;
+
+ } finally {
+ EndPerformSplice(this);
+ if (deleted_elements.length || num_elements_to_add) {
+ EnqueueSpliceRecord(this,
+ start_i,
+ deleted_elements.slice(),
+ num_elements_to_add);
+ }
}
- var use_simple_splice = true;
+ // Return the deleted elements.
+ return deleted_elements;
+}
+
+
+function ArraySplice(start, delete_count) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.splice"]);
+ }
+ if (%IsObserved(this))
+ return ObservedArraySplice.apply(this, arguments);
+
+ var num_arguments = %_ArgumentsLength();
+ var len = TO_UINT32(this.length);
+ var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
+ var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
+ start_i);
+ var deleted_elements = [];
+ deleted_elements.length = del_count;
+ var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
+
+ var use_simple_splice = true;
if (IS_ARRAY(this) &&
- !%IsObserved(this) &&
- num_additional_args !== del_count) {
+ num_elements_to_add !== del_count) {
// If we are only deleting/moving a few things near the end of the
// array then the simple version is going to be faster, because it
// doesn't touch most of the array.
@@ -712,10 +820,10 @@ function ArraySplice(start, delete_count) {
if (use_simple_splice) {
SimpleSlice(this, start_i, del_count, len, deleted_elements);
- SimpleMove(this, start_i, del_count, len, num_additional_args);
+ SimpleMove(this, start_i, del_count, len, num_elements_to_add);
} else {
SmartSlice(this, start_i, del_count, len, deleted_elements);
- SmartMove(this, start_i, del_count, len, num_additional_args);
+ SmartMove(this, start_i, del_count, len, num_elements_to_add);
}
// Insert the arguments into the resulting array in
@@ -726,7 +834,7 @@ function ArraySplice(start, delete_count) {
while (arguments_index < arguments_length) {
this[i++] = %_Arguments(arguments_index++);
}
- this.length = len - del_count + num_additional_args;
+ this.length = len - del_count + num_elements_to_add;
// Return the deleted elements.
return deleted_elements;
@@ -1001,11 +1109,13 @@ function ArraySort(comparefn) {
max_prototype_element = CopyFromPrototype(this, length);
}
- var num_non_undefined = %RemoveArrayHoles(this, length);
+ var num_non_undefined = %IsObserved(this) ?
+ -1 : %RemoveArrayHoles(this, length);
+
if (num_non_undefined == -1) {
- // There were indexed accessors in the array. Move array holes and
- // undefineds to the end using a Javascript function that is safe
- // in the presence of accessors.
+ // The array is observed, or there were indexed accessors in the array.
+ // Move array holes and undefineds to the end using a Javascript function
+ // that is safe in the presence of accessors and is observable.
num_non_undefined = SafeRemoveArrayHoles(this);
}
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
new file mode 100644
index 0000000000..e2ec542a77
--- /dev/null
+++ b/deps/v8/src/assert-scope.h
@@ -0,0 +1,168 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ASSERT_SCOPE_H_
+#define V8_ASSERT_SCOPE_H_
+
+#include "allocation.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+enum PerThreadAssertType {
+ HEAP_ALLOCATION_ASSERT,
+ HANDLE_ALLOCATION_ASSERT,
+ HANDLE_DEREFERENCE_ASSERT,
+ DEFERRED_HANDLE_DEREFERENCE_ASSERT,
+ LAST_PER_THREAD_ASSERT_TYPE
+};
+
+
+#ifdef DEBUG
+class PerThreadAssertData {
+ public:
+ PerThreadAssertData() : nesting_level_(0) {
+ for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
+ assert_states_[i] = true;
+ }
+ }
+
+ void set(PerThreadAssertType type, bool allow) {
+ assert_states_[type] = allow;
+ }
+
+ bool get(PerThreadAssertType type) const {
+ return assert_states_[type];
+ }
+
+ void increment_level() { ++nesting_level_; }
+ bool decrement_level() { return --nesting_level_ == 0; }
+
+ private:
+ bool assert_states_[LAST_PER_THREAD_ASSERT_TYPE];
+ int nesting_level_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
+};
+#endif // DEBUG
+
+
+class PerThreadAssertScopeBase {
+#ifdef DEBUG
+
+ protected:
+ PerThreadAssertScopeBase() {
+ data_ = AssertData();
+ data_->increment_level();
+ }
+
+ ~PerThreadAssertScopeBase() {
+ if (!data_->decrement_level()) return;
+ for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
+ ASSERT(data_->get(static_cast<PerThreadAssertType>(i)));
+ }
+ delete data_;
+ Thread::SetThreadLocal(thread_local_key, NULL);
+ }
+
+ static PerThreadAssertData* AssertData() {
+ PerThreadAssertData* data = reinterpret_cast<PerThreadAssertData*>(
+ Thread::GetThreadLocal(thread_local_key));
+ if (data == NULL) {
+ data = new PerThreadAssertData();
+ Thread::SetThreadLocal(thread_local_key, data);
+ }
+ return data;
+ }
+
+ static Thread::LocalStorageKey thread_local_key;
+ PerThreadAssertData* data_;
+ friend class Isolate;
+#endif // DEBUG
+};
+
+
+
+template <PerThreadAssertType type, bool allow>
+class PerThreadAssertScope : public PerThreadAssertScopeBase {
+ public:
+#ifndef DEBUG
+ PerThreadAssertScope() { }
+ static void SetIsAllowed(bool is_allowed) { }
+#else
+ PerThreadAssertScope() {
+ old_state_ = data_->get(type);
+ data_->set(type, allow);
+ }
+
+ ~PerThreadAssertScope() { data_->set(type, old_state_); }
+
+ static bool IsAllowed() { return AssertData()->get(type); }
+
+ private:
+ bool old_state_;
+#endif
+};
+
+// Scope to document where we do not expect handles to be created.
+typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>
+ DisallowHandleAllocation;
+
+// Scope to introduce an exception to DisallowHandleAllocation.
+typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>
+ AllowHandleAllocation;
+
+// Scope to document where we do not expect any allocation and GC.
+typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>
+ DisallowHeapAllocation;
+
+// Scope to introduce an exception to DisallowHeapAllocation.
+typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>
+ AllowHeapAllocation;
+
+// Scope to document where we do not expect any handle dereferences.
+typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>
+ DisallowHandleDereference;
+
+// Scope to introduce an exception to DisallowHandleDereference.
+typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>
+ AllowHandleDereference;
+
+// Scope to document where we do not expect deferred handles to be dereferenced.
+typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
+ DisallowDeferredHandleDereference;
+
+// Scope to introduce an exception to DisallowDeferredHandleDereference.
+typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
+ AllowDeferredHandleDereference;
+
+} } // namespace v8::internal
+
+#endif // V8_ASSERT_SCOPE_H_
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index d241355fc1..a5d1e2df85 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -30,6 +30,7 @@
#include <cmath> // For isfinite.
#include "builtins.h"
#include "code-stubs.h"
+#include "contexts.h"
#include "conversions.h"
#include "hashmap.h"
#include "parser.h"
@@ -181,9 +182,9 @@ LanguageMode FunctionLiteral::language_mode() const {
}
-ObjectLiteral::Property::Property(Literal* key,
- Expression* value,
- Isolate* isolate) {
+ObjectLiteralProperty::ObjectLiteralProperty(Literal* key,
+ Expression* value,
+ Isolate* isolate) {
emit_store_ = true;
key_ = key;
value_ = value;
@@ -201,7 +202,8 @@ ObjectLiteral::Property::Property(Literal* key,
}
-ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
+ObjectLiteralProperty::ObjectLiteralProperty(bool is_getter,
+ FunctionLiteral* value) {
emit_store_ = true;
value_ = value;
kind_ = is_getter ? GETTER : SETTER;
@@ -415,6 +417,16 @@ bool FunctionDeclaration::IsInlineable() const {
// ----------------------------------------------------------------------------
// Recording of type feedback
+void ForInStatement::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ for_in_type_ = static_cast<ForInType>(oracle->ForInType(this));
+}
+
+
+void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
+ to_boolean_types_ = oracle->ToBooleanTypes(test_id());
+}
+
+
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
// Record type feedback from the oracle in the AST.
@@ -486,6 +498,7 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
store_mode_ = oracle->GetStoreMode(id);
+ type_ = oracle->IncrementType(this);
}
@@ -575,6 +588,32 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
}
+Handle<JSObject> Call::GetPrototypeForPrimitiveCheck(
+ CheckType check, Isolate* isolate) {
+ v8::internal::Context* native_context = isolate->context()->native_context();
+ JSFunction* function = NULL;
+ switch (check) {
+ case RECEIVER_MAP_CHECK:
+ UNREACHABLE();
+ break;
+ case STRING_CHECK:
+ function = native_context->string_function();
+ break;
+ case SYMBOL_CHECK:
+ function = native_context->symbol_function();
+ break;
+ case NUMBER_CHECK:
+ function = native_context->number_function();
+ break;
+ case BOOLEAN_CHECK:
+ function = native_context->boolean_function();
+ break;
+ }
+ ASSERT(function != NULL);
+ return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
+}
+
+
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
CallKind call_kind) {
is_monomorphic_ = oracle->CallIsMonomorphic(this);
@@ -606,8 +645,7 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
map = receiver_types_.at(0);
} else {
ASSERT(check_type_ != RECEIVER_MAP_CHECK);
- holder_ = Handle<JSObject>(
- oracle->GetPrototypeForPrimitiveCheck(check_type_));
+ holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
map = Handle<Map>(holder_->map());
}
is_monomorphic_ = ComputeTarget(map, name);
@@ -617,10 +655,14 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ allocation_info_cell_ = oracle->GetCallNewAllocationInfoCell(this);
is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(this);
- elements_kind_ = oracle->GetCallNewElementsKind(this);
+ Object* value = allocation_info_cell_->value();
+ if (value->IsSmi()) {
+ elements_kind_ = static_cast<ElementsKind>(Smi::cast(value)->value());
+ }
}
}
@@ -632,6 +674,31 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
}
+void UnaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ type_ = oracle->UnaryType(this);
+}
+
+
+void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ oracle->BinaryType(this, &left_type_, &right_type_, &result_type_,
+ &has_fixed_right_arg_, &fixed_right_arg_value_);
+}
+
+
+void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ oracle->CompareType(this, &left_type_, &right_type_, &overall_type_);
+ if (!overall_type_.IsUninitialized() && overall_type_.IsNonPrimitive() &&
+ (op_ == Token::EQ || op_ == Token::EQ_STRICT)) {
+ map_ = oracle->GetCompareMap(this);
+ } else {
+ // May be a compare to nil.
+ map_ = oracle->CompareNilMonomorphicReceiverType(this);
+ if (op_ != Token::EQ_STRICT)
+ compare_nil_types_ = oracle->CompareNilTypes(this);
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
@@ -723,12 +790,12 @@ Interval RegExpQuantifier::CaptureRegisters() {
bool RegExpAssertion::IsAnchoredAtStart() {
- return type() == RegExpAssertion::START_OF_INPUT;
+ return assertion_type() == RegExpAssertion::START_OF_INPUT;
}
bool RegExpAssertion::IsAnchoredAtEnd() {
- return type() == RegExpAssertion::END_OF_INPUT;
+ return assertion_type() == RegExpAssertion::END_OF_INPUT;
}
@@ -860,7 +927,7 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
- switch (that->type()) {
+ switch (that->assertion_type()) {
case RegExpAssertion::START_OF_INPUT:
stream()->Add("@^i");
break;
@@ -1087,6 +1154,7 @@ DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE(ForInStatement)
+DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral)
@@ -1115,6 +1183,7 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
Handle<String> Literal::ToString() {
if (handle_->IsString()) return Handle<String>::cast(handle_);
+ Factory* factory = Isolate::Current()->factory();
ASSERT(handle_->IsNumber());
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
@@ -1126,7 +1195,7 @@ Handle<String> Literal::ToString() {
} else {
str = DoubleToCString(handle_->Number(), buffer);
}
- return FACTORY->NewStringFromAscii(CStrVector(str));
+ return factory->NewStringFromAscii(CStrVector(str));
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index ad7b119854..219a69bc8e 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -39,6 +39,8 @@
#include "small-pointer-list.h"
#include "smart-pointers.h"
#include "token.h"
+#include "type-info.h" // TODO(rossberg): this should eventually be removed
+#include "types.h"
#include "utils.h"
#include "variables.h"
#include "interface.h"
@@ -88,6 +90,7 @@ namespace internal {
V(WhileStatement) \
V(ForStatement) \
V(ForInStatement) \
+ V(ForOfStatement) \
V(TryCatchStatement) \
V(TryFinallyStatement) \
V(DebuggerStatement)
@@ -162,9 +165,9 @@ typedef ZoneList<Handle<String> > ZoneStringList;
typedef ZoneList<Handle<Object> > ZoneObjectList;
-#define DECLARE_NODE_TYPE(type) \
- virtual void Accept(AstVisitor* v); \
- virtual AstNode::Type node_type() const { return AstNode::k##type; } \
+#define DECLARE_NODE_TYPE(type) \
+ virtual void Accept(AstVisitor* v); \
+ virtual AstNode::NodeType node_type() const { return AstNode::k##type; } \
template<class> friend class AstNodeFactory;
@@ -196,7 +199,7 @@ class AstProperties BASE_EMBEDDED {
class AstNode: public ZoneObject {
public:
#define DECLARE_TYPE_ENUM(type) k##type,
- enum Type {
+ enum NodeType {
AST_NODE_LIST(DECLARE_TYPE_ENUM)
kInvalid = -1
};
@@ -211,7 +214,7 @@ class AstNode: public ZoneObject {
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
- virtual Type node_type() const = 0;
+ virtual NodeType node_type() const = 0;
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
@@ -353,6 +356,9 @@ class Expression: public AstNode {
// True iff the expression is the undefined literal.
bool IsUndefinedLiteral();
+ // Expression type
+ Handle<Type> type() { return type_; }
+
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
UNREACHABLE();
@@ -373,15 +379,23 @@ class Expression: public AstNode {
return STANDARD_STORE;
}
+ // TODO(rossberg): this should move to its own AST node eventually.
+ void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
+ byte to_boolean_types() const { return to_boolean_types_; }
+
BailoutId id() const { return id_; }
TypeFeedbackId test_id() const { return test_id_; }
protected:
explicit Expression(Isolate* isolate)
- : id_(GetNextId(isolate)),
+ : type_(Type::Any(), isolate),
+ id_(GetNextId(isolate)),
test_id_(GetNextId(isolate)) {}
private:
+ Handle<Type> type_;
+ byte to_boolean_types_;
+
const BailoutId id_;
const TypeFeedbackId test_id_;
};
@@ -389,7 +403,7 @@ class Expression: public AstNode {
class BreakableStatement: public Statement {
public:
- enum Type {
+ enum BreakableType {
TARGET_FOR_ANONYMOUS,
TARGET_FOR_NAMED_ONLY
};
@@ -405,15 +419,18 @@ class BreakableStatement: public Statement {
Label* break_target() { return &break_target_; }
// Testers.
- bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
+ bool is_target_for_anonymous() const {
+ return breakable_type_ == TARGET_FOR_ANONYMOUS;
+ }
BailoutId EntryId() const { return entry_id_; }
BailoutId ExitId() const { return exit_id_; }
protected:
- BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
+ BreakableStatement(
+ Isolate* isolate, ZoneStringList* labels, BreakableType breakable_type)
: labels_(labels),
- type_(type),
+ breakable_type_(breakable_type),
entry_id_(GetNextId(isolate)),
exit_id_(GetNextId(isolate)) {
ASSERT(labels == NULL || labels->length() > 0);
@@ -422,7 +439,7 @@ class BreakableStatement: public Statement {
private:
ZoneStringList* labels_;
- Type type_;
+ BreakableType breakable_type_;
Label break_target_;
const BailoutId entry_id_;
const BailoutId exit_id_;
@@ -716,6 +733,7 @@ class IterationStatement: public BreakableStatement {
private:
Statement* body_;
Label continue_target_;
+
const BailoutId osr_entry_id_;
};
@@ -751,7 +769,9 @@ class DoWhileStatement: public IterationStatement {
private:
Expression* cond_;
+
int condition_position_;
+
const BailoutId continue_id_;
const BailoutId back_edge_id_;
};
@@ -788,8 +808,10 @@ class WhileStatement: public IterationStatement {
private:
Expression* cond_;
+
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
+
const BailoutId body_id_;
};
@@ -843,51 +865,142 @@ class ForStatement: public IterationStatement {
Statement* init_;
Expression* cond_;
Statement* next_;
+
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
+
const BailoutId continue_id_;
const BailoutId body_id_;
};
-class ForInStatement: public IterationStatement {
+class ForEachStatement: public IterationStatement {
public:
- DECLARE_NODE_TYPE(ForInStatement)
+ enum VisitMode {
+ ENUMERATE, // for (each in subject) body;
+ ITERATE // for (each of subject) body;
+ };
- void Initialize(Expression* each, Expression* enumerable, Statement* body) {
+ void Initialize(Expression* each, Expression* subject, Statement* body) {
IterationStatement::Initialize(body);
each_ = each;
- enumerable_ = enumerable;
+ subject_ = subject;
}
Expression* each() const { return each_; }
- Expression* enumerable() const { return enumerable_; }
+ Expression* subject() const { return subject_; }
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return body_id_; }
- BailoutId BodyId() const { return body_id_; }
- BailoutId PrepareId() const { return prepare_id_; }
+ protected:
+ ForEachStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ each_(NULL),
+ subject_(NULL) {
+ }
+
+ private:
+ Expression* each_;
+ Expression* subject_;
+};
+
+
+class ForInStatement: public ForEachStatement {
+ public:
+ DECLARE_NODE_TYPE(ForInStatement)
+
+ Expression* enumerable() const {
+ return subject();
+ }
TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
+ ForInType for_in_type() const { return for_in_type_; }
+
+ BailoutId BodyId() const { return body_id_; }
+ BailoutId PrepareId() const { return prepare_id_; }
+ virtual BailoutId ContinueId() const { return EntryId(); }
+ virtual BailoutId StackCheckId() const { return body_id_; }
protected:
ForInStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- each_(NULL),
- enumerable_(NULL),
+ : ForEachStatement(isolate, labels),
+ for_in_type_(SLOW_FOR_IN),
body_id_(GetNextId(isolate)),
prepare_id_(GetNextId(isolate)) {
}
- private:
- Expression* each_;
- Expression* enumerable_;
+ ForInType for_in_type_;
const BailoutId body_id_;
const BailoutId prepare_id_;
};
+class ForOfStatement: public ForEachStatement {
+ public:
+ DECLARE_NODE_TYPE(ForOfStatement)
+
+ void Initialize(Expression* each,
+ Expression* subject,
+ Statement* body,
+ Expression* assign_iterator,
+ Expression* next_result,
+ Expression* result_done,
+ Expression* assign_each) {
+ ForEachStatement::Initialize(each, subject, body);
+ assign_iterator_ = assign_iterator;
+ next_result_ = next_result;
+ result_done_ = result_done;
+ assign_each_ = assign_each;
+ }
+
+ Expression* iterable() const {
+ return subject();
+ }
+
+ // var iterator = iterable;
+ Expression* assign_iterator() const {
+ return assign_iterator_;
+ }
+
+ // var result = iterator.next();
+ Expression* next_result() const {
+ return next_result_;
+ }
+
+ // result.done
+ Expression* result_done() const {
+ return result_done_;
+ }
+
+ // each = result.value
+ Expression* assign_each() const {
+ return assign_each_;
+ }
+
+ virtual BailoutId ContinueId() const { return EntryId(); }
+ virtual BailoutId StackCheckId() const { return BackEdgeId(); }
+
+ BailoutId BackEdgeId() const { return back_edge_id_; }
+
+ protected:
+ ForOfStatement(Isolate* isolate, ZoneStringList* labels)
+ : ForEachStatement(isolate, labels),
+ assign_iterator_(NULL),
+ next_result_(NULL),
+ result_done_(NULL),
+ assign_each_(NULL),
+ back_edge_id_(GetNextId(isolate)) {
+ }
+
+ Expression* assign_iterator_;
+ Expression* next_result_;
+ Expression* result_done_;
+ Expression* assign_each_;
+ const BailoutId back_edge_id_;
+};
+
+
class ExpressionStatement: public Statement {
public:
DECLARE_NODE_TYPE(ExpressionStatement)
@@ -1023,11 +1136,16 @@ class SwitchStatement: public BreakableStatement {
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag;
cases_ = cases;
+ switch_type_ = UNKNOWN_SWITCH;
}
Expression* tag() const { return tag_; }
ZoneList<CaseClause*>* cases() const { return cases_; }
+ enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH, GENERIC_SWITCH };
+ SwitchType switch_type() const { return switch_type_; }
+ void set_switch_type(SwitchType switch_type) { switch_type_ = switch_type; }
+
protected:
SwitchStatement(Isolate* isolate, ZoneStringList* labels)
: BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
@@ -1037,6 +1155,7 @@ class SwitchStatement: public BreakableStatement {
private:
Expression* tag_;
ZoneList<CaseClause*>* cases_;
+ SwitchType switch_type_;
};
@@ -1096,7 +1215,7 @@ class TargetCollector: public AstNode {
// Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
- virtual Type node_type() const { return kInvalid; }
+ virtual NodeType node_type() const { return kInvalid; }
virtual TargetCollector* AsTargetCollector() { return this; }
ZoneList<Label*>* targets() { return &targets_; }
@@ -1282,52 +1401,55 @@ class MaterializedLiteral: public Expression {
};
+// Property is used for passing information
+// about an object literal's properties from the parser
+// to the code generator.
+class ObjectLiteralProperty: public ZoneObject {
+ public:
+ enum Kind {
+ CONSTANT, // Property with constant value (compile time).
+ COMPUTED, // Property with computed value (execution time).
+ MATERIALIZED_LITERAL, // Property value is a materialized literal.
+ GETTER, SETTER, // Property is an accessor function.
+ PROTOTYPE // Property is __proto__.
+ };
+
+ ObjectLiteralProperty(Literal* key, Expression* value, Isolate* isolate);
+
+ Literal* key() { return key_; }
+ Expression* value() { return value_; }
+ Kind kind() { return kind_; }
+
+ // Type feedback information.
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ bool IsMonomorphic() { return !receiver_type_.is_null(); }
+ Handle<Map> GetReceiverType() { return receiver_type_; }
+
+ bool IsCompileTimeValue();
+
+ void set_emit_store(bool emit_store);
+ bool emit_store();
+
+ protected:
+ template<class> friend class AstNodeFactory;
+
+ ObjectLiteralProperty(bool is_getter, FunctionLiteral* value);
+ void set_key(Literal* key) { key_ = key; }
+
+ private:
+ Literal* key_;
+ Expression* value_;
+ Kind kind_;
+ bool emit_store_;
+ Handle<Map> receiver_type_;
+};
+
+
// An object literal has a boilerplate object that is used
// for minimizing the work when constructing it at runtime.
class ObjectLiteral: public MaterializedLiteral {
public:
- // Property is used for passing information
- // about an object literal's properties from the parser
- // to the code generator.
- class Property: public ZoneObject {
- public:
- enum Kind {
- CONSTANT, // Property with constant value (compile time).
- COMPUTED, // Property with computed value (execution time).
- MATERIALIZED_LITERAL, // Property value is a materialized literal.
- GETTER, SETTER, // Property is an accessor function.
- PROTOTYPE // Property is __proto__.
- };
-
- Property(Literal* key, Expression* value, Isolate* isolate);
-
- Literal* key() { return key_; }
- Expression* value() { return value_; }
- Kind kind() { return kind_; }
-
- // Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsMonomorphic() { return !receiver_type_.is_null(); }
- Handle<Map> GetReceiverType() { return receiver_type_; }
-
- bool IsCompileTimeValue();
-
- void set_emit_store(bool emit_store);
- bool emit_store();
-
- protected:
- template<class> friend class AstNodeFactory;
-
- Property(bool is_getter, FunctionLiteral* value);
- void set_key(Literal* key) { key_ = key; }
-
- private:
- Literal* key_;
- Expression* value_;
- Kind kind_;
- bool emit_store_;
- Handle<Map> receiver_type_;
- };
+ typedef ObjectLiteralProperty Property;
DECLARE_NODE_TYPE(ObjectLiteral)
@@ -1590,6 +1712,11 @@ class Call: public Expression {
BailoutId ReturnId() const { return return_id_; }
+ // TODO(rossberg): this should really move somewhere else (and be merged with
+ // various similar methods in objets.cc), but for now...
+ static Handle<JSObject> GetPrototypeForPrimitiveCheck(
+ CheckType check, Isolate* isolate);
+
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
bool return_is_recorded_;
@@ -1636,10 +1763,13 @@ class CallNew: public Expression {
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
- Handle<JSFunction> target() { return target_; }
+ Handle<JSFunction> target() const { return target_; }
+ ElementsKind elements_kind() const { return elements_kind_; }
+ Handle<JSGlobalPropertyCell> allocation_info_cell() const {
+ return allocation_info_cell_;
+ }
BailoutId ReturnId() const { return return_id_; }
- ElementsKind elements_kind() const { return elements_kind_; }
protected:
CallNew(Isolate* isolate,
@@ -1651,8 +1781,8 @@ class CallNew: public Expression {
arguments_(arguments),
pos_(pos),
is_monomorphic_(false),
- return_id_(GetNextId(isolate)),
- elements_kind_(GetInitialFastElementsKind()) { }
+ elements_kind_(GetInitialFastElementsKind()),
+ return_id_(GetNextId(isolate)) { }
private:
Expression* expression_;
@@ -1661,9 +1791,10 @@ class CallNew: public Expression {
bool is_monomorphic_;
Handle<JSFunction> target_;
+ ElementsKind elements_kind_;
+ Handle<JSGlobalPropertyCell> allocation_info_cell_;
const BailoutId return_id_;
- ElementsKind elements_kind_;
};
@@ -1713,6 +1844,8 @@ class UnaryOperation: public Expression {
BailoutId MaterializeFalseId() { return materialize_false_id_; }
TypeFeedbackId UnaryOperationFeedbackId() const { return reuse(id()); }
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ TypeInfo type() const { return type_; }
protected:
UnaryOperation(Isolate* isolate,
@@ -1733,6 +1866,8 @@ class UnaryOperation: public Expression {
Expression* expression_;
int pos_;
+ TypeInfo type_;
+
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
const BailoutId materialize_true_id_;
@@ -1754,6 +1889,12 @@ class BinaryOperation: public Expression {
BailoutId RightId() const { return right_id_; }
TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); }
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ TypeInfo left_type() const { return left_type_; }
+ TypeInfo right_type() const { return right_type_; }
+ TypeInfo result_type() const { return result_type_; }
+ bool has_fixed_right_arg() const { return has_fixed_right_arg_; }
+ int fixed_right_arg_value() const { return fixed_right_arg_value_; }
protected:
BinaryOperation(Isolate* isolate,
@@ -1775,6 +1916,13 @@ class BinaryOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
+
+ TypeInfo left_type_;
+ TypeInfo right_type_;
+ TypeInfo result_type_;
+ bool has_fixed_right_arg_;
+ int fixed_right_arg_value_;
+
// The short-circuit logical operations need an AST ID for their
// right-hand subexpression.
const BailoutId right_id_;
@@ -1804,6 +1952,7 @@ class CountOperation: public Expression {
virtual KeyedAccessStoreMode GetStoreMode() {
return store_mode_;
}
+ TypeInfo type() const { return type_; }
BailoutId AssignmentId() const { return assignment_id_; }
@@ -1832,6 +1981,8 @@ class CountOperation: public Expression {
bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
+ TypeInfo type_;
+
Expression* expression_;
int pos_;
const BailoutId assignment_id_;
@@ -1851,6 +2002,12 @@ class CompareOperation: public Expression {
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ TypeInfo left_type() const { return left_type_; }
+ TypeInfo right_type() const { return right_type_; }
+ TypeInfo overall_type() const { return overall_type_; }
+ byte compare_nil_types() const { return compare_nil_types_; }
+ Handle<Map> map() const { return map_; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -1876,6 +2033,12 @@ class CompareOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
+
+ TypeInfo left_type_;
+ TypeInfo right_type_;
+ TypeInfo overall_type_;
+ byte compare_nil_types_;
+ Handle<Map> map_;
};
@@ -2048,7 +2211,7 @@ class Throw: public Expression {
class FunctionLiteral: public Expression {
public:
- enum Type {
+ enum FunctionType {
ANONYMOUS_EXPRESSION,
NAMED_EXPRESSION,
DECLARATION
@@ -2092,12 +2255,6 @@ class FunctionLiteral: public Expression {
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
int handler_count() { return handler_count_; }
- bool has_only_simple_this_property_assignments() {
- return HasOnlySimpleThisPropertyAssignments::decode(bitfield_);
- }
- Handle<FixedArray> this_property_assignments() {
- return this_property_assignments_;
- }
int parameter_count() { return parameter_count_; }
bool AllowsLazyCompilation();
@@ -2152,10 +2309,8 @@ class FunctionLiteral: public Expression {
int materialized_literal_count,
int expected_property_count,
int handler_count,
- bool has_only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments,
int parameter_count,
- Type type,
+ FunctionType function_type,
ParameterFlag has_duplicate_parameters,
IsFunctionFlag is_function,
IsParenthesizedFlag is_parenthesized,
@@ -2164,7 +2319,6 @@ class FunctionLiteral: public Expression {
name_(name),
scope_(scope),
body_(body),
- this_property_assignments_(this_property_assignments),
inferred_name_(isolate->factory()->empty_string()),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
@@ -2172,10 +2326,8 @@ class FunctionLiteral: public Expression {
parameter_count_(parameter_count),
function_token_position_(RelocInfo::kNoPosition) {
bitfield_ =
- HasOnlySimpleThisPropertyAssignments::encode(
- has_only_simple_this_property_assignments) |
- IsExpression::encode(type != DECLARATION) |
- IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
+ IsExpression::encode(function_type != DECLARATION) |
+ IsAnonymous::encode(function_type == ANONYMOUS_EXPRESSION) |
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters) |
IsFunction::encode(is_function) |
@@ -2187,7 +2339,6 @@ class FunctionLiteral: public Expression {
Handle<String> name_;
Scope* scope_;
ZoneList<Statement*>* body_;
- Handle<FixedArray> this_property_assignments_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
@@ -2198,14 +2349,13 @@ class FunctionLiteral: public Expression {
int function_token_position_;
unsigned bitfield_;
- class HasOnlySimpleThisPropertyAssignments: public BitField<bool, 0, 1> {};
- class IsExpression: public BitField<bool, 1, 1> {};
- class IsAnonymous: public BitField<bool, 2, 1> {};
- class Pretenure: public BitField<bool, 3, 1> {};
- class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {};
- class IsFunction: public BitField<IsFunctionFlag, 5, 1> {};
- class IsParenthesized: public BitField<IsParenthesizedFlag, 6, 1> {};
- class IsGenerator: public BitField<IsGeneratorFlag, 7, 1> {};
+ class IsExpression: public BitField<bool, 0, 1> {};
+ class IsAnonymous: public BitField<bool, 1, 1> {};
+ class Pretenure: public BitField<bool, 2, 1> {};
+ class HasDuplicateParameters: public BitField<ParameterFlag, 3, 1> {};
+ class IsFunction: public BitField<IsFunctionFlag, 4, 1> {};
+ class IsParenthesized: public BitField<IsParenthesizedFlag, 5, 1> {};
+ class IsGenerator: public BitField<IsGeneratorFlag, 6, 1> {};
};
@@ -2323,7 +2473,7 @@ class RegExpAlternative: public RegExpTree {
class RegExpAssertion: public RegExpTree {
public:
- enum Type {
+ enum AssertionType {
START_OF_LINE,
START_OF_INPUT,
END_OF_LINE,
@@ -2331,7 +2481,7 @@ class RegExpAssertion: public RegExpTree {
BOUNDARY,
NON_BOUNDARY
};
- explicit RegExpAssertion(Type type) : type_(type) { }
+ explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
virtual void* Accept(RegExpVisitor* visitor, void* data);
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success);
@@ -2341,9 +2491,9 @@ class RegExpAssertion: public RegExpTree {
virtual bool IsAnchoredAtEnd();
virtual int min_match() { return 0; }
virtual int max_match() { return 0; }
- Type type() { return type_; }
+ AssertionType assertion_type() { return assertion_type_; }
private:
- Type type_;
+ AssertionType assertion_type_;
};
@@ -2456,13 +2606,13 @@ class RegExpText: public RegExpTree {
class RegExpQuantifier: public RegExpTree {
public:
- enum Type { GREEDY, NON_GREEDY, POSSESSIVE };
- RegExpQuantifier(int min, int max, Type type, RegExpTree* body)
+ enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
+ RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
: body_(body),
min_(min),
max_(max),
min_match_(min * body->min_match()),
- type_(type) {
+ quantifier_type_(type) {
if (max > 0 && body->max_match() > kInfinity / max) {
max_match_ = kInfinity;
} else {
@@ -2486,9 +2636,9 @@ class RegExpQuantifier: public RegExpTree {
virtual int max_match() { return max_match_; }
int min() { return min_; }
int max() { return max_; }
- bool is_possessive() { return type_ == POSSESSIVE; }
- bool is_non_greedy() { return type_ == NON_GREEDY; }
- bool is_greedy() { return type_ == GREEDY; }
+ bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
+ bool is_non_greedy() { return quantifier_type_ == NON_GREEDY; }
+ bool is_greedy() { return quantifier_type_ == GREEDY; }
RegExpTree* body() { return body_; }
private:
@@ -2497,7 +2647,7 @@ class RegExpQuantifier: public RegExpTree {
int max_;
int min_match_;
int max_match_;
- Type type_;
+ QuantifierType quantifier_type_;
};
@@ -2788,10 +2938,25 @@ class AstNodeFactory BASE_EMBEDDED {
STATEMENT_WITH_LABELS(DoWhileStatement)
STATEMENT_WITH_LABELS(WhileStatement)
STATEMENT_WITH_LABELS(ForStatement)
- STATEMENT_WITH_LABELS(ForInStatement)
STATEMENT_WITH_LABELS(SwitchStatement)
#undef STATEMENT_WITH_LABELS
+ ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
+ ZoneStringList* labels) {
+ switch (visit_mode) {
+ case ForEachStatement::ENUMERATE: {
+ ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels);
+ VISIT_AND_RETURN(ForInStatement, stmt);
+ }
+ case ForEachStatement::ITERATE: {
+ ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels);
+ VISIT_AND_RETURN(ForOfStatement, stmt);
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
VISIT_AND_RETURN(ModuleStatement, stmt)
@@ -3028,19 +3193,16 @@ class AstNodeFactory BASE_EMBEDDED {
int materialized_literal_count,
int expected_property_count,
int handler_count,
- bool has_only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments,
int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
- FunctionLiteral::Type type,
+ FunctionLiteral::FunctionType function_type,
FunctionLiteral::IsFunctionFlag is_function,
FunctionLiteral::IsParenthesizedFlag is_parenthesized,
FunctionLiteral::IsGeneratorFlag is_generator) {
FunctionLiteral* lit = new(zone_) FunctionLiteral(
isolate_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count,
- has_only_simple_this_property_assignments, this_property_assignments,
- parameter_count, type, has_duplicate_parameters, is_function,
+ parameter_count, function_type, has_duplicate_parameters, is_function,
is_parenthesized, is_generator);
// Top-level literal doesn't count for the AST's properties.
if (is_function == FunctionLiteral::kIsFunction) {
diff --git a/deps/v8/src/atomicops_internals_mips_gcc.h b/deps/v8/src/atomicops_internals_mips_gcc.h
index 9498fd76e1..cb8f8b9d95 100644
--- a/deps/v8/src/atomicops_internals_mips_gcc.h
+++ b/deps/v8/src/atomicops_internals_mips_gcc.h
@@ -30,8 +30,6 @@
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
namespace v8 {
namespace internal {
@@ -111,9 +109,9 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
- ATOMICOPS_COMPILER_BARRIER();
+ MemoryBarrier();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
- ATOMICOPS_COMPILER_BARRIER();
+ MemoryBarrier();
return res;
}
@@ -126,19 +124,16 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- ATOMICOPS_COMPILER_BARRIER();
+ MemoryBarrier();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- ATOMICOPS_COMPILER_BARRIER();
- Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- ATOMICOPS_COMPILER_BARRIER();
- return res;
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -176,6 +171,4 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
} } // namespace v8::internal
-#undef ATOMICOPS_COMPILER_BARRIER
-
#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 7c9e4366ed..a51a9b117e 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -1086,11 +1086,13 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->length_string(),
- factory->undefined_value(), DONT_ENUM));
+ factory->undefined_value(), DONT_ENUM,
+ Object::FORCE_TAGGED));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->callee_string(),
- factory->undefined_value(), DONT_ENUM));
+ factory->undefined_value(), DONT_ENUM,
+ Object::FORCE_TAGGED));
#ifdef DEBUG
LookupResult lookup(isolate);
@@ -1320,10 +1322,11 @@ void Genesis::InitializeExperimentalGlobal() {
if (FLAG_harmony_array_buffer) {
// -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun =
- InstallFunction(global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ InstallFunction(
+ global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
+ JSArrayBuffer::kSizeWithInternalFields,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
native_context()->set_array_buffer_fun(*array_buffer_fun);
}
@@ -1574,6 +1577,11 @@ void Genesis::InstallExperimentalNativeFunctions() {
}
if (FLAG_harmony_observation) {
INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
+ INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice);
+ INSTALL_NATIVE(JSFunction, "BeginPerformSplice",
+ observers_begin_perform_splice);
+ INSTALL_NATIVE(JSFunction, "EndPerformSplice",
+ observers_end_perform_splice);
INSTALL_NATIVE(JSFunction, "DeliverChangeRecords",
observers_deliver_changes);
}
@@ -1604,19 +1612,23 @@ Handle<JSFunction> Genesis::InstallInternalArray(
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(array_function, prototype);
- array_function->shared()->set_construct_stub(
- isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode));
+ if (FLAG_optimize_constructed_arrays) {
+ InternalArrayConstructorStub internal_array_constructor_stub(isolate());
+ Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
+ array_function->shared()->set_construct_stub(*code);
+ } else {
+ array_function->shared()->set_construct_stub(
+ isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode));
+ }
array_function->shared()->DontAdaptArguments();
- MaybeObject* maybe_map = array_function->initial_map()->Copy();
- Map* new_map;
- if (!maybe_map->To(&new_map)) return Handle<JSFunction>::null();
- new_map->set_elements_kind(elements_kind);
- array_function->set_initial_map(new_map);
+ Handle<Map> original_map(array_function->initial_map());
+ Handle<Map> initial_map = factory()->CopyMap(original_map);
+ initial_map->set_elements_kind(elements_kind);
+ array_function->set_initial_map(*initial_map);
// Make "length" magic on instances.
- Handle<Map> initial_map(array_function->initial_map());
Handle<DescriptorArray> array_descriptors(
factory()->NewDescriptorArray(0, 1));
DescriptorArray::WhitenessWitness witness(*array_descriptors);
@@ -1870,14 +1882,11 @@ bool Genesis::InstallNatives() {
{
Handle<JSFunction> array_function =
InstallInternalArray(builtins, "InternalArray", FAST_HOLEY_ELEMENTS);
- if (array_function.is_null()) return false;
native_context()->set_internal_array_function(*array_function);
}
{
- Handle<JSFunction> array_function =
- InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS);
- if (array_function.is_null()) return false;
+ InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS);
}
if (FLAG_disable_native_files) {
@@ -2129,7 +2138,8 @@ void Genesis::InstallJSFunctionResultCaches() {
#undef F
;
- Handle<FixedArray> caches = FACTORY->NewFixedArray(kNumberOfCaches, TENURED);
+ Handle<FixedArray> caches =
+ factory()->NewFixedArray(kNumberOfCaches, TENURED);
int index = 0;
@@ -2148,7 +2158,7 @@ void Genesis::InstallJSFunctionResultCaches() {
void Genesis::InitializeNormalizedMapCaches() {
Handle<FixedArray> array(
- FACTORY->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
+ factory()->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
native_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
}
@@ -2508,14 +2518,13 @@ void Genesis::TransferIndexedProperties(Handle<JSObject> from,
// Cloning the elements array is sufficient.
Handle<FixedArray> from_elements =
Handle<FixedArray>(FixedArray::cast(from->elements()));
- Handle<FixedArray> to_elements = FACTORY->CopyFixedArray(from_elements);
+ Handle<FixedArray> to_elements = factory()->CopyFixedArray(from_elements);
to->set_elements(*to_elements);
}
void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
HandleScope outer(isolate());
- Factory* factory = isolate()->factory();
ASSERT(!from->IsJSArray());
ASSERT(!to->IsJSArray());
@@ -2525,7 +2534,7 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
// Transfer the prototype (new map is needed).
Handle<Map> old_to_map = Handle<Map>(to->map());
- Handle<Map> new_to_map = factory->CopyMap(old_to_map);
+ Handle<Map> new_to_map = factory()->CopyMap(old_to_map);
new_to_map->set_prototype(from->map()->prototype());
to->set_map(*new_to_map);
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 476ac12e14..3097800390 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -65,13 +65,14 @@ class SourceCodeCache BASE_EMBEDDED {
}
void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
- HandleScope scope(shared->GetIsolate());
+ Isolate* isolate = shared->GetIsolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
int length = cache_->length();
- Handle<FixedArray> new_array =
- FACTORY->NewFixedArray(length + 2, TENURED);
+ Handle<FixedArray> new_array = factory->NewFixedArray(length + 2, TENURED);
cache_->CopyTo(0, *new_array, 0, cache_->length());
cache_ = *new_array;
- Handle<String> str = FACTORY->NewStringFromAscii(name, TENURED);
+ Handle<String> str = factory->NewStringFromAscii(name, TENURED);
cache_->set(length, *str);
cache_->set(length + 1, *shared);
Script::cast(shared->script())->set_type(Smi::FromInt(type_));
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 81b600574c..d97a4778af 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -194,64 +194,6 @@ BUILTIN(EmptyFunction) {
}
-RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
- // If we get 2 arguments then they are the stub parameters (constructor, type
- // info). If we get 3, then the first one is a pointer to the arguments
- // passed by the caller.
- Arguments empty_args(0, NULL);
- bool no_caller_args = args.length() == 2;
- ASSERT(no_caller_args || args.length() == 3);
- int parameters_start = no_caller_args ? 0 : 1;
- Arguments* caller_args = no_caller_args
- ? &empty_args
- : reinterpret_cast<Arguments*>(args[0]);
- Handle<JSFunction> constructor = args.at<JSFunction>(parameters_start);
- Handle<Object> type_info = args.at<Object>(parameters_start + 1);
-
- bool holey = false;
- if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) {
- int value = Smi::cast((*caller_args)[0])->value();
- holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray);
- }
-
- JSArray* array;
- MaybeObject* maybe_array;
- if (*type_info != isolate->heap()->undefined_value() &&
- JSGlobalPropertyCell::cast(*type_info)->value()->IsSmi()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info);
- Smi* smi = Smi::cast(cell->value());
- ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
- if (holey && !IsFastHoleyElementsKind(to_kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- // Update the allocation site info to reflect the advice alteration.
- cell->set_value(Smi::FromInt(to_kind));
- }
-
- maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
- *constructor, type_info);
- if (!maybe_array->To(&array)) return maybe_array;
- } else {
- ElementsKind kind = constructor->initial_map()->elements_kind();
- ASSERT(kind == GetInitialFastElementsKind());
- maybe_array = isolate->heap()->AllocateJSObject(*constructor);
- if (!maybe_array->To(&array)) return maybe_array;
- // We might need to transition to holey
- if (holey) {
- kind = GetHoleyElementsKind(kind);
- maybe_array = array->TransitionElementsKind(kind);
- if (maybe_array->IsFailure()) return maybe_array;
- }
- }
-
- maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
- DONT_INITIALIZE_ARRAY_ELEMENTS);
- if (maybe_array->IsFailure()) return maybe_array;
- maybe_array = ArrayConstructInitializeElements(array, caller_args);
- if (maybe_array->IsFailure()) return maybe_array;
- return array;
-}
-
-
static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
Isolate* isolate,
JSFunction* constructor) {
@@ -563,7 +505,7 @@ BUILTIN(ArrayPush) {
}
// Add the provided values.
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int index = 0; index < to_add; index++) {
elms->set(index + len, args[index + 1], mode);
@@ -612,7 +554,7 @@ BUILTIN(ArrayPush) {
}
// Add the provided values.
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
int index;
for (index = 0; index < to_add; index++) {
Object* arg = args[index + 1];
@@ -695,7 +637,7 @@ BUILTIN(ArrayShift) {
// Shift the elements.
if (elms_obj->IsFixedArray()) {
FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
heap->MoveElements(elms, 0, 1, len - 1);
elms->set(len - 1, heap->the_hole_value());
} else {
@@ -762,12 +704,12 @@ BUILTIN(ArrayUnshift) {
elms = new_elms;
array->set_elements(elms);
} else {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
heap->MoveElements(elms, to_add, 0, len);
}
// Add the provided values.
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int i = 0; i < to_add; i++) {
elms->set(i, args[i + 1], mode);
@@ -898,7 +840,7 @@ BUILTIN(ArraySlice) {
result_len,
result_len);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
if (result_len == 0) return maybe_array;
if (!maybe_array->To(&result_array)) return maybe_array;
@@ -1000,7 +942,7 @@ BUILTIN(ArraySplice) {
if (!maybe_array->To(&result_array)) return maybe_array;
if (actual_delete_count > 0) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure = accessor->CopyElements(
NULL, actual_start, elements_kind, result_array->elements(),
@@ -1025,7 +967,7 @@ BUILTIN(ArraySplice) {
MoveDoubleElements(elms, delta, elms, 0, actual_start);
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
heap->MoveElements(elms, delta, 0, actual_start);
}
@@ -1041,7 +983,7 @@ BUILTIN(ArraySplice) {
FillWithHoles(elms, new_length, len);
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
heap->MoveElements(elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
@@ -1062,7 +1004,7 @@ BUILTIN(ArraySplice) {
MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->To(&new_elms)) return maybe_obj;
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
ElementsKind kind = array->GetElementsKind();
ElementsAccessor* accessor = array->GetElementsAccessor();
@@ -1083,7 +1025,7 @@ BUILTIN(ArraySplice) {
elms_obj = new_elms;
elms_changed = true;
} else {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
heap->MoveElements(elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
@@ -1102,7 +1044,7 @@ BUILTIN(ArraySplice) {
}
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int k = actual_start; k < actual_start + item_count; k++) {
elms->set(k, args[3 + k - actual_start], mode);
@@ -1466,6 +1408,11 @@ static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
}
+static void Generate_LoadIC_Slow(MacroAssembler* masm) {
+ LoadIC::GenerateRuntimeGetProperty(masm);
+}
+
+
static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
KeyedLoadIC::GenerateInitialize(masm);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 58d1a8b147..c45fbfd335 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -144,6 +144,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
+ V(LoadIC_Slow, LOAD_IC, GENERIC, \
+ Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
index 8bcde1c61c..82086824dd 100644
--- a/deps/v8/src/checks.cc
+++ b/deps/v8/src/checks.cc
@@ -36,6 +36,8 @@ static int fatal_error_handler_nesting_depth = 0;
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+ i::AllowHandleDereference allow_deref;
+ i::AllowDeferredHandleDereference allow_deferred_deref;
fflush(stdout);
fflush(stderr);
fatal_error_handler_nesting_depth++;
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 6e837ddb95..99c4db55b7 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -36,10 +36,9 @@ namespace internal {
static LChunk* OptimizeGraph(HGraph* graph) {
- Isolate* isolate = graph->isolate();
- AssertNoAllocation no_gc;
- NoHandleAllocation no_handles(isolate);
- HandleDereferenceGuard no_deref(isolate, HandleDereferenceGuard::DISALLOW);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
ASSERT(graph != NULL);
SmartArrayPointer<char> bailout_reason;
@@ -100,7 +99,23 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
IfBuilder checker_;
};
+ enum ArgumentClass {
+ NONE,
+ SINGLE,
+ MULTIPLE
+ };
+
+ HValue* BuildArrayConstructor(ElementsKind kind,
+ bool disable_allocation_sites,
+ ArgumentClass argument_class);
+ HValue* BuildInternalArrayConstructor(ElementsKind kind,
+ ArgumentClass argument_class);
+
private:
+ HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
+ HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
+ ElementsKind kind);
+
SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
CompilationInfoWithZone info_;
@@ -148,7 +163,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
HParameter::REGISTER_PARAMETER,
Representation::Integer32());
stack_parameter_count->set_type(HType::Smi());
- // it's essential to bind this value to the environment in case of deopt
+ // It's essential to bind this value to the environment in case of deopt.
AddInstruction(stack_parameter_count);
start_environment->Bind(param_count, stack_parameter_count);
arguments_length_ = stack_parameter_count;
@@ -169,7 +184,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
HValue* return_value = BuildCodeStub();
// We might have extra expressions to pop from the stack in addition to the
- // arguments above
+ // arguments above.
HInstruction* stack_pop_count = stack_parameter_count;
if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) {
if (!stack_parameter_count->IsConstant() &&
@@ -186,11 +201,12 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
}
}
- if (!current_block()->IsFinished()) {
+ if (current_block() != NULL) {
HReturn* hreturn_instruction = new(zone) HReturn(return_value,
context_,
stack_pop_count);
current_block()->Finish(hreturn_instruction);
+ set_current_block(NULL);
}
return true;
}
@@ -204,10 +220,10 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
protected:
virtual HValue* BuildCodeStub() {
- if (casted_stub()->IsMiss()) {
- return BuildCodeInitializedStub();
- } else {
+ if (casted_stub()->IsUninitialized()) {
return BuildCodeUninitializedStub();
+ } else {
+ return BuildCodeInitializedStub();
}
}
@@ -276,16 +292,17 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
if (descriptor->register_param_count_ < 0) {
stub->InitializeInterfaceDescriptor(isolate, descriptor);
}
- // The miss case without stack parameters can use a light-weight stub to enter
+
+ // If we are uninitialized we can use a light-weight stub to enter
// the runtime that is significantly faster than using the standard
// stub-failure deopt mechanism.
- if (stub->IsMiss() && descriptor->stack_parameter_count_ == NULL) {
+ if (stub->IsUninitialized() && descriptor->has_miss_handler()) {
+ ASSERT(descriptor->stack_parameter_count_ == NULL);
return stub->GenerateLightweightMissCode(isolate);
- } else {
- CodeStubGraphBuilder<Stub> builder(stub);
- LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen();
}
+ CodeStubGraphBuilder<Stub> builder(stub);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen();
}
@@ -358,7 +375,6 @@ Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
Zone* zone = this->zone();
- Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
HInstruction* boilerplate =
@@ -383,24 +399,17 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* size_in_bytes =
AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
- if (FLAG_pretenure_literals) {
+ if (isolate()->heap()->ShouldGloballyPretenure()) {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
}
- HInstruction* object =
- AddInstruction(new(zone) HAllocate(context(),
- size_in_bytes,
- HType::JSObject(),
- flags));
+
+ HInstruction* object = AddInstruction(new(zone)
+ HAllocate(context(), size_in_bytes, HType::JSObject(), flags));
for (int i = 0; i < size; i += kPointerSize) {
- HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(
- boilerplate, true, Representation::Tagged(), i));
- AddInstruction(new(zone) HStoreNamedField(object,
- factory->empty_string(),
- value, true,
- Representation::Tagged(), i));
+ HObjectAccess access = HObjectAccess::ForJSObjectOffset(i);
+ AddStore(object, access, AddLoad(boilerplate, access));
}
checker.ElseDeopt();
@@ -418,7 +427,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL, NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- false, NEVER_RETURN_HOLE, STANDARD_STORE, Representation::Tagged());
+ false, NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
@@ -430,11 +439,11 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
template<>
HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
- Representation representation = casted_stub()->representation();
- HInstruction* load = AddInstruction(DoBuildLoadNamedField(
- GetParameter(0), casted_stub()->is_inobject(),
- representation, casted_stub()->offset()));
- return load;
+ HObjectAccess access = casted_stub()->is_inobject() ?
+ HObjectAccess::ForJSObjectOffset(casted_stub()->offset()) :
+ HObjectAccess::ForBackingStoreOffset(casted_stub()->offset());
+ return AddInstruction(BuildLoadNamedField(GetParameter(0), access,
+ casted_stub()->representation()));
}
@@ -445,11 +454,11 @@ Handle<Code> LoadFieldStub::GenerateCode() {
template<>
HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
- Representation representation = casted_stub()->representation();
- HInstruction* load = AddInstruction(DoBuildLoadNamedField(
- GetParameter(0), casted_stub()->is_inobject(),
- representation, casted_stub()->offset()));
- return load;
+ HObjectAccess access = casted_stub()->is_inobject() ?
+ HObjectAccess::ForJSObjectOffset(casted_stub()->offset()) :
+ HObjectAccess::ForBackingStoreOffset(casted_stub()->offset());
+ return AddInstruction(BuildLoadNamedField(GetParameter(0), access,
+ casted_stub()->representation()));
}
@@ -463,8 +472,7 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- true, NEVER_RETURN_HOLE, casted_stub()->store_mode(),
- Representation::Tagged());
+ true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
return GetParameter(2);
}
@@ -487,8 +495,8 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
AddInstruction(new(zone) HTrapAllocationMemento(js_array));
HInstruction* array_length =
- AddInstruction(HLoadNamedField::NewArrayLength(
- zone, js_array, js_array, HType::Smi()));
+ AddLoad(js_array, HObjectAccess::ForArrayLength());
+ array_length->set_type(HType::Smi());
ElementsKind to_kind = casted_stub()->to_kind();
BuildNewSpaceArrayCheck(array_length, to_kind);
@@ -507,27 +515,19 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
HInstruction* elements_length =
AddInstruction(new(zone) HFixedArrayBaseLength(elements));
- HValue* new_elements =
- BuildAllocateAndInitializeElements(context(), to_kind, elements_length);
+ HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
+ context(), to_kind, elements_length);
BuildCopyElements(context(), elements,
casted_stub()->from_kind(), new_elements,
to_kind, array_length, elements_length);
- Factory* factory = isolate()->factory();
-
- AddInstruction(new(zone) HStoreNamedField(js_array,
- factory->elements_field_string(),
- new_elements, true,
- Representation::Tagged(),
- JSArray::kElementsOffset));
+ AddStore(js_array, HObjectAccess::ForElementsPointer(), new_elements);
if_builder.End();
- AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
- map, true,
- Representation::Tagged(),
- JSArray::kMapOffset));
+ AddStore(js_array, HObjectAccess::ForMap(), map);
+
return js_array;
}
@@ -536,40 +536,56 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
return DoGenerateCode(this);
}
-
-template <>
-HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
- // ----------- S t a t e -------------
- // -- Parameter 1 : type info cell
- // -- Parameter 0 : constructor
- // -----------------------------------
+HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
+ ElementsKind kind, bool disable_allocation_sites,
+ ArgumentClass argument_class) {
+ HValue* constructor = GetParameter(ArrayConstructorStubBase::kConstructor);
+ HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
HInstruction* array_function = BuildGetArrayFunction(context());
- ArrayContextChecker(this,
- GetParameter(ArrayConstructorStubBase::kConstructor),
- array_function);
- // Get the right map
- // Should be a constant
- JSArrayBuilder array_builder(
- this,
- casted_stub()->elements_kind(),
- GetParameter(ArrayConstructorStubBase::kPropertyCell),
- casted_stub()->mode());
- return array_builder.AllocateEmptyArray();
+
+ ArrayContextChecker(this, constructor, array_function);
+ JSArrayBuilder array_builder(this, kind, property_cell,
+ disable_allocation_sites);
+ HValue* result = NULL;
+ switch (argument_class) {
+ case NONE:
+ result = array_builder.AllocateEmptyArray();
+ break;
+ case SINGLE:
+ result = BuildArraySingleArgumentConstructor(&array_builder);
+ break;
+ case MULTIPLE:
+ result = BuildArrayNArgumentsConstructor(&array_builder, kind);
+ break;
+ }
+ return result;
}
-Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+HValue* CodeStubGraphBuilderBase::BuildInternalArrayConstructor(
+ ElementsKind kind, ArgumentClass argument_class) {
+ HValue* constructor = GetParameter(
+ InternalArrayConstructorStubBase::kConstructor);
+ JSArrayBuilder array_builder(this, kind, constructor);
+
+ HValue* result = NULL;
+ switch (argument_class) {
+ case NONE:
+ result = array_builder.AllocateEmptyArray();
+ break;
+ case SINGLE:
+ result = BuildArraySingleArgumentConstructor(&array_builder);
+ break;
+ case MULTIPLE:
+ result = BuildArrayNArgumentsConstructor(&array_builder, kind);
+ break;
+ }
+ return result;
}
-template <>
-HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
- BuildCodeStub() {
- HInstruction* array_function = BuildGetArrayFunction(context());
- ArrayContextChecker(this,
- GetParameter(ArrayConstructorStubBase::kConstructor),
- array_function);
+HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
+ JSArrayBuilder* array_builder) {
// Smi check and range check on the input arg.
HValue* constant_one = graph()->GetConstant1();
HValue* constant_zero = graph()->GetConstant0();
@@ -580,19 +596,13 @@ HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
new(zone()) HAccessArgumentsAt(elements, constant_one, constant_zero));
HConstant* max_alloc_length =
- new(zone()) HConstant(JSObject::kInitialMaxFastElementArray,
- Representation::Tagged());
+ new(zone()) HConstant(JSObject::kInitialMaxFastElementArray);
AddInstruction(max_alloc_length);
const int initial_capacity = JSArray::kPreallocatedArrayElements;
- HConstant* initial_capacity_node =
- new(zone()) HConstant(initial_capacity, Representation::Tagged());
+ HConstant* initial_capacity_node = new(zone()) HConstant(initial_capacity);
AddInstruction(initial_capacity_node);
- // Since we're forcing Integer32 representation for this HBoundsCheck,
- // there's no need to Smi-check the index.
- HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length,
- ALLOW_SMI_KEY,
- Representation::Tagged());
+ HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length);
IfBuilder if_builder(this);
if_builder.IfCompare(checked_arg, constant_zero, Token::EQ);
if_builder.Then();
@@ -606,46 +616,23 @@ HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
// Figure out total size
HValue* length = Pop();
HValue* capacity = Pop();
-
- JSArrayBuilder array_builder(
- this,
- casted_stub()->elements_kind(),
- GetParameter(ArrayConstructorStubBase::kPropertyCell),
- casted_stub()->mode());
- return array_builder.AllocateArray(capacity, length, true);
-}
-
-
-Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+ return array_builder->AllocateArray(capacity, length, true);
}
-template <>
-HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
- HInstruction* array_function = BuildGetArrayFunction(context());
- ArrayContextChecker(this,
- GetParameter(ArrayConstructorStubBase::kConstructor),
- array_function);
- ElementsKind kind = casted_stub()->elements_kind();
- HValue* length = GetArgumentsLength();
-
- JSArrayBuilder array_builder(
- this,
- kind,
- GetParameter(ArrayConstructorStubBase::kPropertyCell),
- casted_stub()->mode());
-
+HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
+ JSArrayBuilder* array_builder, ElementsKind kind) {
// We need to fill with the hole if it's a smi array in the multi-argument
// case because we might have to bail out while copying arguments into
// the array because they aren't compatible with a smi array.
// If it's a double array, no problem, and if it's fast then no
// problem either because doubles are boxed.
+ HValue* length = GetArgumentsLength();
bool fill_with_hole = IsFastSmiElementsKind(kind);
- HValue* new_object = array_builder.AllocateArray(length,
- length,
- fill_with_hole);
- HValue* elements = array_builder.GetElementsLocation();
+ HValue* new_object = array_builder->AllocateArray(length,
+ length,
+ fill_with_hole);
+ HValue* elements = array_builder->GetElementsLocation();
ASSERT(elements != NULL);
// Now populate the elements correctly.
@@ -659,39 +646,108 @@ HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
HInstruction* argument = AddInstruction(new(zone()) HAccessArgumentsAt(
argument_elements, length, key));
- // Checks to prevent incompatible stores
- if (IsFastSmiElementsKind(kind)) {
- AddInstruction(new(zone()) HCheckSmi(argument));
- }
-
AddInstruction(new(zone()) HStoreKeyed(elements, key, argument, kind));
builder.EndBody();
return new_object;
}
+template <>
+HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
+ return BuildArrayConstructor(kind, disable_allocation_sites, NONE);
+}
+
+
+Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
+ BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
+ return BuildArrayConstructor(kind, disable_allocation_sites, SINGLE);
+}
+
+
+Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
+ return BuildArrayConstructor(kind, disable_allocation_sites, MULTIPLE);
+}
+
+
Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
-HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeUninitializedStub() {
+HValue* CodeStubGraphBuilder<InternalArrayNoArgumentConstructorStub>::
+ BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ return BuildInternalArrayConstructor(kind, NONE);
+}
+
+
+Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<InternalArraySingleArgumentConstructorStub>::
+ BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ return BuildInternalArrayConstructor(kind, SINGLE);
+}
+
+
+Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<InternalArrayNArgumentsConstructorStub>::
+ BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ return BuildInternalArrayConstructor(kind, MULTIPLE);
+}
+
+
+Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
CompareNilICStub* stub = casted_stub();
HIfContinuation continuation;
Handle<Map> sentinel_map(graph()->isolate()->heap()->meta_map());
- BuildCompareNil(GetParameter(0), stub->GetKind(),
+ BuildCompareNil(GetParameter(0),
stub->GetTypes(), sentinel_map,
RelocInfo::kNoPosition, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
if (continuation.IsFalseReachable()) {
if_nil.Else();
- if_nil.Return(graph()->GetConstantSmi0());
+ if_nil.Return(graph()->GetConstant0());
}
if_nil.End();
return continuation.IsTrueReachable()
- ? graph()->GetConstantSmi1()
+ ? graph()->GetConstant1()
: graph()->GetConstantUndefined();
}
@@ -700,4 +756,24 @@ Handle<Code> CompareNilICStub::GenerateCode() {
return DoGenerateCode(this);
}
+
+template <>
+HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
+ ToBooleanStub* stub = casted_stub();
+
+ IfBuilder if_true(this);
+ if_true.If<HBranch>(GetParameter(0), stub->GetTypes());
+ if_true.Then();
+ if_true.Return(graph()->GetConstant1());
+ if_true.Else();
+ if_true.End();
+ return graph()->GetConstant0();
+}
+
+
+Handle<Code> ToBooleanStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 312febc1aa..6b6e25019d 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -45,7 +45,8 @@ CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
deoptimization_handler_(NULL),
- miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()) { }
+ miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()),
+ has_miss_handler_(false) { }
bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
@@ -304,6 +305,27 @@ void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
}
+InlineCacheState ICCompareStub::GetICState() {
+ CompareIC::State state = Max(left_, right_);
+ switch (state) {
+ case CompareIC::UNINITIALIZED:
+ return ::v8::internal::UNINITIALIZED;
+ case CompareIC::SMI:
+ case CompareIC::NUMBER:
+ case CompareIC::INTERNALIZED_STRING:
+ case CompareIC::STRING:
+ case CompareIC::UNIQUE_NAME:
+ case CompareIC::OBJECT:
+ case CompareIC::KNOWN_OBJECT:
+ return MONOMORPHIC;
+ case CompareIC::GENERIC:
+ return ::v8::internal::GENERIC;
+ }
+ UNREACHABLE();
+ return ::v8::internal::UNINITIALIZED;
+}
+
+
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
@@ -410,36 +432,44 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
void CompareNilICStub::Record(Handle<Object> object) {
ASSERT(types_ != Types::FullCompare());
- if (equality_kind_ == kStrictEquality) {
- // When testing for strict equality only one value will evaluate to true
- types_.RemoveAll();
- types_.Add((nil_value_ == kNullValue) ? NULL_TYPE:
- UNDEFINED);
+ if (object->IsNull()) {
+ types_.Add(NULL_TYPE);
+ } else if (object->IsUndefined()) {
+ types_.Add(UNDEFINED);
+ } else if (object->IsUndetectableObject() ||
+ object->IsOddball() ||
+ !object->IsHeapObject()) {
+ types_ = Types::FullCompare();
+ } else if (IsMonomorphic()) {
+ types_ = Types::FullCompare();
} else {
- if (object->IsNull()) {
- types_.Add(NULL_TYPE);
- } else if (object->IsUndefined()) {
- types_.Add(UNDEFINED);
- } else if (object->IsUndetectableObject() ||
- object->IsOddball() ||
- !object->IsHeapObject()) {
- types_ = Types::FullCompare();
- } else if (IsMonomorphic()) {
- types_ = Types::FullCompare();
- } else {
- types_.Add(MONOMORPHIC_MAP);
- }
+ types_.Add(MONOMORPHIC_MAP);
}
}
+void CompareNilICStub::Types::TraceTransition(Types to) const {
+ #ifdef DEBUG
+ if (!FLAG_trace_ic) return;
+ char buffer[100];
+ NoAllocationStringAllocator allocator(buffer,
+ static_cast<unsigned>(sizeof(buffer)));
+ StringStream stream(&allocator);
+ stream.Add("[CompareNilIC : ");
+ Print(&stream);
+ stream.Add("=>");
+ to.Print(&stream);
+ stream.Add("]\n");
+ stream.OutputToStdOut();
+ #endif
+}
+
+
void CompareNilICStub::PrintName(StringStream* stream) {
stream->Add("CompareNilICStub_");
types_.Print(stream);
stream->Add((nil_value_ == kNullValue) ? "(NullValue|":
"(UndefinedValue|");
- stream->Add((equality_kind_ == kStrictEquality) ? "StrictEquality)":
- "NonStrictEquality)");
}
@@ -554,6 +584,14 @@ void CallConstructStub::PrintName(StringStream* stream) {
}
+bool ToBooleanStub::Record(Handle<Object> object) {
+ Types old_types(types_);
+ bool to_boolean_value = types_.Record(object);
+ old_types.TraceTransition(types_);
+ return to_boolean_value;
+}
+
+
void ToBooleanStub::PrintName(StringStream* stream) {
stream->Add("ToBooleanStub_");
types_.Print(stream);
@@ -577,17 +615,19 @@ void ToBooleanStub::Types::Print(StringStream* stream) const {
void ToBooleanStub::Types::TraceTransition(Types to) const {
+ #ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
static_cast<unsigned>(sizeof(buffer)));
StringStream stream(&allocator);
- stream.Add("[ToBooleanIC (");
+ stream.Add("[ToBooleanIC : ");
Print(&stream);
- stream.Add("->");
+ stream.Add("=>");
to.Print(&stream);
- stream.Add(")]\n");
+ stream.Add("]\n");
stream.OutputToStdOut();
+ #endif
}
@@ -749,4 +789,19 @@ ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
}
+void InternalArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
+ InternalArrayNoArgumentConstructorStub stub1(FAST_ELEMENTS);
+ InstallDescriptor(isolate, &stub1);
+ InternalArraySingleArgumentConstructorStub stub2(FAST_ELEMENTS);
+ InstallDescriptor(isolate, &stub2);
+ InternalArrayNArgumentsConstructorStub stub3(FAST_ELEMENTS);
+ InstallDescriptor(isolate, &stub3);
+}
+
+InternalArrayConstructorStub::InternalArrayConstructorStub(
+ Isolate* isolate) {
+ InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index aa6a410195..0ea7ac96b5 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -77,6 +77,9 @@ namespace internal {
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
+ V(InternalArrayNoArgumentConstructor) \
+ V(InternalArraySingleArgumentConstructor) \
+ V(InternalArrayNArgumentsConstructor) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
V(NameDictionaryLookup) \
@@ -85,6 +88,7 @@ namespace internal {
V(StoreArrayLiteralElement) \
V(StubFailureTrampoline) \
V(ArrayConstructor) \
+ V(InternalArrayConstructor) \
V(ProfileEntryHook) \
/* IC Handler stubs */ \
V(LoadField) \
@@ -277,7 +281,6 @@ struct CodeStubInterfaceDescriptor {
StubFunctionMode function_mode_;
Register* register_params_;
Address deoptimization_handler_;
- ExternalReference miss_handler_;
int environment_length() const {
if (stack_parameter_count_ != NULL) {
@@ -287,6 +290,24 @@ struct CodeStubInterfaceDescriptor {
}
bool initialized() const { return register_param_count_ >= 0; }
+
+ void SetMissHandler(ExternalReference handler) {
+ miss_handler_ = handler;
+ has_miss_handler_ = true;
+ }
+
+ ExternalReference miss_handler() {
+ ASSERT(has_miss_handler_);
+ return miss_handler_;
+ }
+
+ bool has_miss_handler() {
+ return has_miss_handler_;
+ }
+
+ private:
+ ExternalReference miss_handler_;
+ bool has_miss_handler_;
};
// A helper to make up for the fact that type Register is not fully
@@ -300,12 +321,12 @@ struct CodeStubInterfaceDescriptor {
class HydrogenCodeStub : public CodeStub {
public:
enum InitializationState {
- CODE_STUB_IS_NOT_MISS,
- CODE_STUB_IS_MISS
+ UNINITIALIZED,
+ INITIALIZED
};
- explicit HydrogenCodeStub(InitializationState state) {
- is_miss_ = (state == CODE_STUB_IS_MISS);
+ explicit HydrogenCodeStub(InitializationState state = INITIALIZED) {
+ is_uninitialized_ = (state == UNINITIALIZED);
}
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
@@ -314,7 +335,7 @@ class HydrogenCodeStub : public CodeStub {
return isolate->code_stub_interface_descriptor(MajorKey());
}
- bool IsMiss() { return is_miss_; }
+ bool IsUninitialized() { return is_uninitialized_; }
template<class SubClass>
static Handle<Code> GetUninitialized(Isolate* isolate) {
@@ -339,11 +360,11 @@ class HydrogenCodeStub : public CodeStub {
void GenerateLightweightMiss(MacroAssembler* masm);
virtual int MinorKey() {
- return IsMissBits::encode(is_miss_) |
+ return IsMissBits::encode(is_uninitialized_) |
MinorKeyBits::encode(NotMissMinorKey());
}
- bool is_miss_;
+ bool is_uninitialized_;
};
@@ -516,8 +537,7 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
FastCloneShallowArrayStub(Mode mode,
AllocationSiteMode allocation_site_mode,
int length)
- : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS),
- mode_(mode),
+ : mode_(mode),
allocation_site_mode_(allocation_site_mode),
length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
ASSERT_GE(length_, 0);
@@ -577,8 +597,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
static const int kMaximumClonedProperties = 6;
explicit FastCloneShallowObjectStub(int length)
- : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS),
- length_(length) {
+ : length_(length) {
ASSERT_GE(length_, 0);
ASSERT_LE(length_, kMaximumClonedProperties);
}
@@ -655,9 +674,23 @@ class ArrayConstructorStub: public PlatformCodeStub {
};
+class InternalArrayConstructorStub: public PlatformCodeStub {
+ public:
+ explicit InternalArrayConstructorStub(Isolate* isolate);
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ virtual CodeStub::Major MajorKey() { return InternalArrayConstructor; }
+ virtual int MinorKey() { return 0; }
+
+ void GenerateCase(MacroAssembler* masm, ElementsKind kind);
+};
+
+
class MathPowStub: public PlatformCodeStub {
public:
- enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
+ enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK };
explicit MathPowStub(ExponentType exponent_type)
: exponent_type_(exponent_type) { }
@@ -763,7 +796,7 @@ class HICStub: public HydrogenCodeStub {
virtual InlineCacheState GetICState() { return MONOMORPHIC; }
protected:
- HICStub() : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { }
+ HICStub() { }
class KindBits: public BitField<Code::Kind, 0, 4> {};
virtual Code::Kind kind() const = 0;
};
@@ -870,7 +903,9 @@ class BinaryOpStub: public PlatformCodeStub {
platform_specific_bit_(false),
left_type_(BinaryOpIC::UNINITIALIZED),
right_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
+ result_type_(BinaryOpIC::UNINITIALIZED),
+ has_fixed_right_arg_(false),
+ encoded_right_arg_(encode_arg_value(1)) {
Initialize();
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -879,13 +914,17 @@ class BinaryOpStub: public PlatformCodeStub {
int key,
BinaryOpIC::TypeInfo left_type,
BinaryOpIC::TypeInfo right_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ BinaryOpIC::TypeInfo result_type,
+ bool has_fixed_right_arg,
+ int32_t fixed_right_arg_value)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
platform_specific_bit_(PlatformSpecificBits::decode(key)),
left_type_(left_type),
right_type_(right_type),
- result_type_(result_type) { }
+ result_type_(result_type),
+ has_fixed_right_arg_(has_fixed_right_arg),
+ encoded_right_arg_(encode_arg_value(fixed_right_arg_value)) { }
static void decode_types_from_minor_key(int minor_key,
BinaryOpIC::TypeInfo* left_type,
@@ -903,6 +942,24 @@ class BinaryOpStub: public PlatformCodeStub {
return static_cast<Token::Value>(OpBits::decode(minor_key));
}
+ static bool decode_has_fixed_right_arg_from_minor_key(int minor_key) {
+ return HasFixedRightArgBits::decode(minor_key);
+ }
+
+ static int decode_fixed_right_arg_value_from_minor_key(int minor_key) {
+ return decode_arg_value(FixedRightArgValueBits::decode(minor_key));
+ }
+
+ int fixed_right_arg_value() const {
+ return decode_arg_value(encoded_right_arg_);
+ }
+
+ static bool can_encode_arg_value(int32_t value) {
+ return value > 0 &&
+ IsPowerOf2(value) &&
+ FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
+ }
+
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
@@ -918,15 +975,31 @@ class BinaryOpStub: public PlatformCodeStub {
BinaryOpIC::TypeInfo right_type_;
BinaryOpIC::TypeInfo result_type_;
+ bool has_fixed_right_arg_;
+ int encoded_right_arg_;
+
+ static int encode_arg_value(int32_t value) {
+ ASSERT(can_encode_arg_value(value));
+ return WhichPowerOf2(value);
+ }
+
+ static int32_t decode_arg_value(int value) {
+ return 1 << value;
+ }
+
virtual void PrintName(StringStream* stream);
- // Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM.
+ // Minor key encoding in all 25 bits FFFFFHTTTRRRLLLPOOOOOOOMM.
+ // Note: We actually do not need 7 bits for the operation, just 4 bits to
+ // encode ADD, SUB, MUL, DIV, MOD, BIT_OR, BIT_AND, BIT_XOR, SAR, SHL, SHR.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class PlatformSpecificBits: public BitField<bool, 9, 1> {};
class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
+ class HasFixedRightArgBits: public BitField<bool, 19, 1> {};
+ class FixedRightArgValueBits: public BitField<int, 20, 5> {};
Major MajorKey() { return BinaryOp; }
int MinorKey() {
@@ -935,7 +1008,9 @@ class BinaryOpStub: public PlatformCodeStub {
| PlatformSpecificBits::encode(platform_specific_bit_)
| LeftTypeBits::encode(left_type_)
| RightTypeBits::encode(right_type_)
- | ResultTypeBits::encode(result_type_);
+ | ResultTypeBits::encode(result_type_)
+ | HasFixedRightArgBits::encode(has_fixed_right_arg_)
+ | FixedRightArgValueBits::encode(encoded_right_arg_);
}
@@ -1005,6 +1080,8 @@ class ICCompareStub: public PlatformCodeStub {
return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
}
+ virtual InlineCacheState GetICState();
+
private:
class OpField: public BitField<int, 0, 3> { };
class LeftStateField: public BitField<int, 3, 4> { };
@@ -1069,6 +1146,7 @@ class CompareNilICStub : public HydrogenCodeStub {
}
void Print(StringStream* stream) const;
+ void TraceTransition(Types to) const;
};
// At most 6 different types can be distinguished, because the Code object
@@ -1076,23 +1154,21 @@ class CompareNilICStub : public HydrogenCodeStub {
// boolean flags we need to store. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
- CompareNilICStub(EqualityKind kind, NilValue nil, Types types)
- : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), types_(types) {
- equality_kind_ = kind;
+ CompareNilICStub(NilValue nil, Types types = Types())
+ : types_(types) {
nil_value_ = nil;
}
- explicit CompareNilICStub(Code::ExtraICState ic_state)
- : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
- equality_kind_ = EqualityKindField::decode(ic_state);
+ CompareNilICStub(Code::ExtraICState ic_state,
+ InitializationState init_state = INITIALIZED)
+ : HydrogenCodeStub(init_state) {
nil_value_ = NilValueField::decode(ic_state);
types_ = Types(ExtractTypesFromExtraICState(ic_state));
}
static Handle<Code> GetUninitialized(Isolate* isolate,
- EqualityKind kind,
NilValue nil) {
- return CompareNilICStub(kind, nil, CODE_STUB_IS_MISS).GetCode(isolate);
+ return CompareNilICStub(nil, UNINITIALIZED).GetCode(isolate);
}
virtual void InitializeInterfaceDescriptor(
@@ -1100,8 +1176,7 @@ class CompareNilICStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor);
static void InitializeForIsolate(Isolate* isolate) {
- CompareNilICStub compare_stub(kStrictEquality, kNullValue,
- CODE_STUB_IS_MISS);
+ CompareNilICStub compare_stub(kNullValue, UNINITIALIZED);
compare_stub.InitializeInterfaceDescriptor(
isolate,
isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC));
@@ -1121,53 +1196,38 @@ class CompareNilICStub : public HydrogenCodeStub {
Handle<Code> GenerateCode();
- // extra ic state = nil_value | equality_kind | type_n-1 | ... | type_0
+ // extra ic state = nil_value | type_n-1 | ... | type_0
virtual Code::ExtraICState GetExtraICState() {
return NilValueField::encode(nil_value_) |
- EqualityKindField::encode(equality_kind_) |
types_.ToIntegral();
}
static byte ExtractTypesFromExtraICState(
Code::ExtraICState state) {
- return state & ((1<<NUMBER_OF_TYPES)-1);
+ return state & ((1 << NUMBER_OF_TYPES) - 1);
}
void Record(Handle<Object> object);
bool IsMonomorphic() const { return types_.Contains(MONOMORPHIC_MAP); }
- EqualityKind GetKind() const { return equality_kind_; }
NilValue GetNilValue() const { return nil_value_; }
Types GetTypes() const { return types_; }
void ClearTypes() { types_.RemoveAll(); }
- void SetKind(EqualityKind kind) { equality_kind_ = kind; }
virtual void PrintName(StringStream* stream);
private:
friend class CompareNilIC;
- CompareNilICStub(EqualityKind kind, NilValue nil,
- InitializationState init_state)
- : HydrogenCodeStub(init_state), types_(0) {
- equality_kind_ = kind;
- nil_value_ = nil;
- }
-
- CompareNilICStub(Code::ExtraICState ic_state, InitializationState init_state)
+ CompareNilICStub(NilValue nil, InitializationState init_state)
: HydrogenCodeStub(init_state) {
- equality_kind_ = EqualityKindField::decode(ic_state);
- nil_value_ = NilValueField::decode(ic_state);
- types_ = Types(ExtractTypesFromExtraICState(ic_state));
+ nil_value_ = nil;
}
- class EqualityKindField : public BitField<EqualityKind, NUMBER_OF_TYPES, 1> {
- };
- class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES+1, 1> {};
+ class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES, 1> {};
virtual CodeStub::Major MajorKey() { return CompareNilIC; }
virtual int NotMissMinorKey() { return GetExtraICState(); }
- EqualityKind equality_kind_;
NilValue nil_value_;
Types types_;
@@ -1567,8 +1627,7 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
class KeyedLoadFastElementStub : public HydrogenCodeStub {
public:
- KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind)
- : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
+ KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array);
}
@@ -1603,8 +1662,7 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
public:
KeyedStoreFastElementStub(bool is_js_array,
ElementsKind elements_kind,
- KeyedAccessStoreMode mode)
- : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
+ KeyedAccessStoreMode mode) {
bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array) |
StoreModeBits::encode(mode);
@@ -1644,8 +1702,7 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
class TransitionElementsKindStub : public HydrogenCodeStub {
public:
TransitionElementsKindStub(ElementsKind from_kind,
- ElementsKind to_kind)
- : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
+ ElementsKind to_kind) {
bit_field_ = FromKindBits::encode(from_kind) |
ToKindBits::encode(to_kind);
}
@@ -1678,20 +1735,22 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
- ArrayConstructorStubBase(ElementsKind kind, AllocationSiteMode mode)
- : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
+ ArrayConstructorStubBase(ElementsKind kind, bool disable_allocation_sites) {
+ // It only makes sense to override local allocation site behavior
+ // if there is a difference between the global allocation site policy
+ // for an ElementsKind and the desired usage of the stub.
+ ASSERT(!disable_allocation_sites ||
+ AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
bit_field_ = ElementsKindBits::encode(kind) |
- AllocationSiteModeBits::encode(mode == TRACK_ALLOCATION_SITE);
+ DisableAllocationSitesBits::encode(disable_allocation_sites);
}
ElementsKind elements_kind() const {
return ElementsKindBits::decode(bit_field_);
}
- AllocationSiteMode mode() const {
- return AllocationSiteModeBits::decode(bit_field_)
- ? TRACK_ALLOCATION_SITE
- : DONT_TRACK_ALLOCATION_SITE;
+ bool disable_allocation_sites() const {
+ return DisableAllocationSitesBits::decode(bit_field_);
}
virtual bool IsPregenerated() { return true; }
@@ -1706,7 +1765,7 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
int NotMissMinorKey() { return bit_field_; }
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
- class AllocationSiteModeBits: public BitField<bool, 8, 1> {};
+ class DisableAllocationSitesBits: public BitField<bool, 8, 1> {};
uint32_t bit_field_;
DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase);
@@ -1717,8 +1776,8 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNoArgumentConstructorStub(
ElementsKind kind,
- AllocationSiteMode mode = TRACK_ALLOCATION_SITE)
- : ArrayConstructorStubBase(kind, mode) {
+ bool disable_allocation_sites = false)
+ : ArrayConstructorStubBase(kind, disable_allocation_sites) {
}
virtual Handle<Code> GenerateCode();
@@ -1738,8 +1797,8 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArraySingleArgumentConstructorStub(
ElementsKind kind,
- AllocationSiteMode mode = TRACK_ALLOCATION_SITE)
- : ArrayConstructorStubBase(kind, mode) {
+ bool disable_allocation_sites = false)
+ : ArrayConstructorStubBase(kind, disable_allocation_sites) {
}
virtual Handle<Code> GenerateCode();
@@ -1759,8 +1818,8 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNArgumentsConstructorStub(
ElementsKind kind,
- AllocationSiteMode mode = TRACK_ALLOCATION_SITE) :
- ArrayConstructorStubBase(kind, mode) {
+ bool disable_allocation_sites = false)
+ : ArrayConstructorStubBase(kind, disable_allocation_sites) {
}
virtual Handle<Code> GenerateCode();
@@ -1776,6 +1835,87 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
};
+class InternalArrayConstructorStubBase : public HydrogenCodeStub {
+ public:
+ explicit InternalArrayConstructorStubBase(ElementsKind kind) {
+ kind_ = kind;
+ }
+
+ virtual bool IsPregenerated() { return true; }
+ static void GenerateStubsAheadOfTime(Isolate* isolate);
+ static void InstallDescriptors(Isolate* isolate);
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kConstructor = 0;
+
+ ElementsKind elements_kind() const { return kind_; }
+
+ private:
+ int NotMissMinorKey() { return kind_; }
+
+ ElementsKind kind_;
+
+ DISALLOW_COPY_AND_ASSIGN(InternalArrayConstructorStubBase);
+};
+
+
+class InternalArrayNoArgumentConstructorStub : public
+ InternalArrayConstructorStubBase {
+ public:
+ explicit InternalArrayNoArgumentConstructorStub(ElementsKind kind)
+ : InternalArrayConstructorStubBase(kind) { }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return InternalArrayNoArgumentConstructor; }
+
+ DISALLOW_COPY_AND_ASSIGN(InternalArrayNoArgumentConstructorStub);
+};
+
+
+class InternalArraySingleArgumentConstructorStub : public
+ InternalArrayConstructorStubBase {
+ public:
+ explicit InternalArraySingleArgumentConstructorStub(ElementsKind kind)
+ : InternalArrayConstructorStubBase(kind) { }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return InternalArraySingleArgumentConstructor; }
+
+ DISALLOW_COPY_AND_ASSIGN(InternalArraySingleArgumentConstructorStub);
+};
+
+
+class InternalArrayNArgumentsConstructorStub : public
+ InternalArrayConstructorStubBase {
+ public:
+ explicit InternalArrayNArgumentsConstructorStub(ElementsKind kind)
+ : InternalArrayConstructorStubBase(kind) { }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return InternalArrayNArgumentsConstructor; }
+
+ DISALLOW_COPY_AND_ASSIGN(InternalArrayNArgumentsConstructorStub);
+};
+
+
class KeyedStoreElementStub : public PlatformCodeStub {
public:
KeyedStoreElementStub(bool is_js_array,
@@ -1811,7 +1951,7 @@ class KeyedStoreElementStub : public PlatformCodeStub {
};
-class ToBooleanStub: public PlatformCodeStub {
+class ToBooleanStub: public HydrogenCodeStub {
public:
enum Type {
UNDEFINED,
@@ -1845,31 +1985,54 @@ class ToBooleanStub: public PlatformCodeStub {
static Types no_types() { return Types(); }
static Types all_types() { return Types((1 << NUMBER_OF_TYPES) - 1); }
- explicit ToBooleanStub(Register tos, Types types = Types())
- : tos_(tos), types_(types) { }
+ explicit ToBooleanStub(Types types = Types())
+ : types_(types) { }
+ explicit ToBooleanStub(Code::ExtraICState state)
+ : types_(static_cast<byte>(state)) { }
+
+ bool Record(Handle<Object> object);
+ Types GetTypes() { return types_; }
+
+ virtual Handle<Code> GenerateCode();
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
- void Generate(MacroAssembler* masm);
virtual Code::Kind GetCodeKind() const { return Code::TO_BOOLEAN_IC; }
virtual void PrintName(StringStream* stream);
virtual bool SometimesSetsUpAFrame() { return false; }
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) |
- types_.ToByte(); }
+ static void InitializeForIsolate(Isolate* isolate) {
+ ToBooleanStub stub;
+ stub.InitializeInterfaceDescriptor(
+ isolate,
+ isolate->code_stub_interface_descriptor(CodeStub::ToBoolean));
+ }
- virtual void FinishCode(Handle<Code> code) {
- code->set_to_boolean_state(types_.ToByte());
+ static Handle<Code> GetUninitialized(Isolate* isolate) {
+ return ToBooleanStub(UNINITIALIZED).GetCode(isolate);
}
- void CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result);
- void GenerateTypeTransition(MacroAssembler* masm);
+ virtual Code::ExtraICState GetExtraICState() {
+ return types_.ToIntegral();
+ }
+
+ virtual InlineCacheState GetICState() {
+ if (types_.IsEmpty()) {
+ return ::v8::internal::UNINITIALIZED;
+ } else {
+ return MONOMORPHIC;
+ }
+ }
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int NotMissMinorKey() { return GetExtraICState(); }
+
+ explicit ToBooleanStub(InitializationState init_state) :
+ HydrogenCodeStub(init_state) {}
- Register tos_;
Types types_;
};
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index ff4003c1ef..af2f1f667b 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -58,13 +58,12 @@ Comment::~Comment() {
#undef __
-void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
-#ifdef DEBUG
+void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
bool print_source = false;
bool print_ast = false;
const char* ftype;
- if (Isolate::Current()->bootstrapper()->IsActive()) {
+ if (info->isolate()->bootstrapper()->IsActive()) {
print_source = FLAG_print_builtin_source;
print_ast = FLAG_print_builtin_ast;
ftype = "builtin";
@@ -75,17 +74,18 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
}
if (FLAG_trace_codegen || print_source || print_ast) {
- PrintF("*** Generate code for %s function: ", ftype);
+ PrintF("[generating %s code for %s function: ", kind, ftype);
if (info->IsStub()) {
const char* name =
CodeStub::MajorName(info->code_stub()->MajorKey(), true);
PrintF("%s", name == NULL ? "<unknown>" : name);
} else {
- info->function()->name()->ShortPrint();
+ PrintF("%s", *info->function()->debug_name()->ToCString());
}
- PrintF(" ***\n");
+ PrintF("]\n");
}
+#ifdef DEBUG
if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
PrettyPrinter().PrintProgram(info->function()));
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 09907c4a20..53ff2e1a1a 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -113,18 +113,6 @@ class ElementsTransitionGenerator : public AllStatic {
};
-class SeqStringSetCharGenerator : public AllStatic {
- public:
- static void Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value);
- private:
- DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator);
-};
-
-
} } // namespace v8::internal
#endif // V8_CODEGEN_H_
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 504575803d..5fc107f943 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -36,6 +36,7 @@
#include "deoptimizer.h"
#include "full-codegen.h"
#include "gdb-jit.h"
+#include "typing.h"
#include "hydrogen.h"
#include "isolate-inl.h"
#include "lithium.h"
@@ -361,11 +362,11 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
isolate()->GetHTracer()->TraceCompilation(info());
}
- Handle<Context> native_context(
- info()->closure()->context()->native_context());
- oracle_ = new(info()->zone()) TypeFeedbackOracle(
- code, native_context, isolate(), info()->zone());
- graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
+
+ // Type-check the function.
+ AstTyper::Type(info());
+
+ graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info());
Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
@@ -392,9 +393,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
- AssertNoAllocation no_gc;
- NoHandleAllocation no_handles(isolate());
- HandleDereferenceGuard no_deref(isolate(), HandleDereferenceGuard::DISALLOW);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
ASSERT(last_status() == SUCCEEDED);
Timer t(this, &time_taken_to_optimize_);
@@ -423,8 +424,7 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
// graph creation. To make sure that we don't encounter inconsistencies
// between graph creation and code generation, we disallow accessing
// objects through deferred handles during the latter, with exceptions.
- HandleDereferenceGuard no_deref_deferred(
- isolate(), HandleDereferenceGuard::DISALLOW_DEFERRED);
+ DisallowDeferredHandleDereference no_deferred_handle_deref;
Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) {
info()->set_bailout_reason("code generation failed");
@@ -649,7 +649,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
// in that case too.
// Create a script object describing the script to be compiled.
- Handle<Script> script = FACTORY->NewScript(source);
+ Handle<Script> script = isolate->factory()->NewScript(source);
if (natives == NATIVES_CODE) {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
}
@@ -771,13 +771,6 @@ static bool InstallFullCode(CompilationInfo* info) {
int expected = lit->expected_property_count();
SetExpectedNofPropertiesFromEstimate(shared, expected);
- // Set the optimization hints after performing lazy compilation, as
- // these are not set when the function is set up as a lazily
- // compiled function.
- shared->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
-
// Check the function has compiled code.
ASSERT(shared->is_compiled());
shared->set_code_age(0);
@@ -957,9 +950,6 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
if (status == OptimizingCompiler::SUCCEEDED) {
info.Detach();
shared->code()->set_profiler_ticks(0);
- // Do a scavenge to put off the next scavenge as far as possible.
- // This may ease the issue that GVN blocks the next scavenge.
- isolate->heap()->CollectGarbage(NEW_SPACE, "parallel recompile");
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
} else if (status == OptimizingCompiler::BAILED_OUT) {
isolate->clear_pending_exception();
@@ -1054,6 +1044,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
info.SetLanguageMode(literal->scope()->language_mode());
Isolate* isolate = info.isolate();
+ Factory* factory = isolate->factory();
LiveEditFunctionTracker live_edit_tracker(isolate, literal);
// Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
@@ -1083,7 +1074,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Create a shared function info object.
Handle<SharedFunctionInfo> result =
- FACTORY->NewSharedFunctionInfo(literal->name(),
+ factory->NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(),
literal->is_generator(),
info.code(),
@@ -1120,9 +1111,6 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_is_anonymous(lit->is_anonymous());
function_info->set_is_toplevel(is_toplevel);
function_info->set_inferred_name(*lit->inferred_name());
- function_info->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_allows_lazy_compilation_without_context(
lit->AllowsLazyCompilationWithoutContext());
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index dbb513ccdb..8e6d295996 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -449,7 +449,6 @@ class OptimizingCompiler: public ZoneObject {
public:
explicit OptimizingCompiler(CompilationInfo* info)
: info_(info),
- oracle_(NULL),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
@@ -478,7 +477,6 @@ class OptimizingCompiler: public ZoneObject {
private:
CompilationInfo* info_;
- TypeFeedbackOracle* oracle_;
HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 86406e5a09..f04ccd1f3e 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -172,6 +172,11 @@ enum BindingFlags {
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
+ V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \
+ V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, \
+ observers_begin_perform_splice) \
+ V(OBSERVERS_END_SPLICE_INDEX, JSFunction, \
+ observers_end_perform_splice) \
V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
V(STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX, Map, \
@@ -317,6 +322,9 @@ class Context: public FixedArray {
DERIVED_SET_TRAP_INDEX,
PROXY_ENUMERATE_INDEX,
OBSERVERS_NOTIFY_CHANGE_INDEX,
+ OBSERVERS_ENQUEUE_SPLICE_INDEX,
+ OBSERVERS_BEGIN_SPLICE_INDEX,
+ OBSERVERS_END_SPLICE_INDEX,
OBSERVERS_DELIVER_CHANGES_INDEX,
GENERATOR_FUNCTION_MAP_INDEX,
STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index c30d4d44f2..42722191bd 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -461,10 +461,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
// Enumerate stuff we already have in the heap.
if (isolate_->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
- bool saved_log_code_flag = FLAG_log_code;
- FLAG_log_code = true;
isolate_->logger()->LogCodeObjects();
- FLAG_log_code = saved_log_code_flag;
}
isolate_->logger()->LogCompiledFunctions();
isolate_->logger()->LogAccessorCallbacks();
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 1be782a241..424dbbb393 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -238,7 +238,7 @@ class ExecArgs {
// Gets the optional timeouts from the arguments to the system() call.
-static bool GetTimeouts(const Arguments& args,
+static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
int* read_timeout,
int* total_timeout) {
if (args.Length() > 3) {
@@ -448,25 +448,28 @@ static bool WaitForChild(int pid,
// Implementation of the system() function (see d8.h for details).
-Handle<Value> Shell::System(const Arguments& args) {
+void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
int read_timeout = -1;
int total_timeout = -1;
- if (!GetTimeouts(args, &read_timeout, &total_timeout)) return v8::Undefined();
+ if (!GetTimeouts(args, &read_timeout, &total_timeout)) return;
Handle<Array> command_args;
if (args.Length() > 1) {
if (!args[1]->IsArray()) {
- return ThrowException(String::New("system: Argument 2 must be an array"));
+ ThrowException(String::New("system: Argument 2 must be an array"));
+ return;
}
command_args = Handle<Array>::Cast(args[1]);
} else {
command_args = Array::New(0);
}
if (command_args->Length() > ExecArgs::kMaxArgs) {
- return ThrowException(String::New("Too many arguments to system()"));
+ ThrowException(String::New("Too many arguments to system()"));
+ return;
}
if (args.Length() < 1) {
- return ThrowException(String::New("Too few arguments to system()"));
+ ThrowException(String::New("Too few arguments to system()"));
+ return;
}
struct timeval start_time;
@@ -474,16 +477,18 @@ Handle<Value> Shell::System(const Arguments& args) {
ExecArgs exec_args;
if (!exec_args.Init(args[0], command_args)) {
- return v8::Undefined();
+ return;
}
int exec_error_fds[2];
int stdout_fds[2];
if (pipe(exec_error_fds) != 0) {
- return ThrowException(String::New("pipe syscall failed."));
+ ThrowException(String::New("pipe syscall failed."));
+ return;
}
if (pipe(stdout_fds) != 0) {
- return ThrowException(String::New("pipe syscall failed."));
+ ThrowException(String::New("pipe syscall failed."));
+ return;
}
pid_t pid = fork();
@@ -499,7 +504,7 @@ Handle<Value> Shell::System(const Arguments& args) {
OpenFDCloser error_read_closer(exec_error_fds[kReadFD]);
OpenFDCloser stdout_read_closer(stdout_fds[kReadFD]);
- if (!ChildLaunchedOK(exec_error_fds)) return v8::Undefined();
+ if (!ChildLaunchedOK(exec_error_fds)) return;
Handle<Value> accumulator = GetStdout(stdout_fds[kReadFD],
start_time,
@@ -507,7 +512,8 @@ Handle<Value> Shell::System(const Arguments& args) {
total_timeout);
if (accumulator->IsUndefined()) {
kill(pid, SIGINT); // On timeout, kill the subprocess.
- return accumulator;
+ args.GetReturnValue().Set(accumulator);
+ return;
}
if (!WaitForChild(pid,
@@ -515,42 +521,47 @@ Handle<Value> Shell::System(const Arguments& args) {
start_time,
read_timeout,
total_timeout)) {
- return v8::Undefined();
+ return;
}
- return scope.Close(accumulator);
+ args.GetReturnValue().Set(accumulator);
}
-Handle<Value> Shell::ChangeDirectory(const Arguments& args) {
+void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "chdir() takes one argument";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.chdir(): String conversion of argument failed.";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
if (chdir(*directory) != 0) {
- return ThrowException(String::New(strerror(errno)));
+ ThrowException(String::New(strerror(errno)));
+ return;
}
- return v8::Undefined();
}
-Handle<Value> Shell::SetUMask(const Arguments& args) {
+void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "umask() takes one argument";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
if (args[0]->IsNumber()) {
mode_t mask = args[0]->Int32Value();
int previous = umask(mask);
- return Number::New(previous);
+ args.GetReturnValue().Set(previous);
+ return;
} else {
const char* message = "umask() argument must be numeric";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
}
@@ -598,79 +609,85 @@ static bool mkdirp(char* directory, mode_t mask) {
}
-Handle<Value> Shell::MakeDirectory(const Arguments& args) {
+void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
mode_t mask = 0777;
if (args.Length() == 2) {
if (args[1]->IsNumber()) {
mask = args[1]->Int32Value();
} else {
const char* message = "mkdirp() second argument must be numeric";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
} else if (args.Length() != 1) {
const char* message = "mkdirp() takes one or two arguments";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.mkdirp(): String conversion of argument failed.";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
mkdirp(*directory, mask);
- return v8::Undefined();
}
-Handle<Value> Shell::RemoveDirectory(const Arguments& args) {
+void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "rmdir() takes one or two arguments";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.rmdir(): String conversion of argument failed.";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
rmdir(*directory);
- return v8::Undefined();
}
-Handle<Value> Shell::SetEnvironment(const Arguments& args) {
+void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
const char* message = "setenv() takes two arguments";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
String::Utf8Value var(args[0]);
String::Utf8Value value(args[1]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
if (*value == NULL) {
const char* message =
"os.setenv(): String conversion of variable contents failed.";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
setenv(*var, *value, 1);
- return v8::Undefined();
}
-Handle<Value> Shell::UnsetEnvironment(const Arguments& args) {
+void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "unsetenv() takes one argument";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
String::Utf8Value var(args[0]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
- return ThrowException(String::New(message));
+ ThrowException(String::New(message));
+ return;
}
unsetenv(*var);
- return v8::Undefined();
}
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index b95432e269..a917dbdbe3 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -40,11 +40,6 @@
#include <string.h>
#include <sys/stat.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#ifdef V8_SHARED
#include <assert.h>
#endif // V8_SHARED
@@ -243,8 +238,10 @@ bool Shell::ExecuteString(Isolate* isolate,
#if !defined(V8_SHARED)
} else {
v8::TryCatch try_catch;
- Context::Scope context_scope(isolate, utility_context_);
- Handle<Object> global = utility_context_->Global();
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Context::Scope context_scope(context);
+ Handle<Object> global = context->Global();
Handle<Value> fun = global->Get(String::New("Stringify"));
Handle<Value> argv[1] = { result };
Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv);
@@ -266,8 +263,7 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_current_ = 0;
data_->realm_switch_ = 0;
data_->realms_ = new Persistent<Context>[1];
- data_->realms_[0] =
- Persistent<Context>::New(data_->isolate_, Context::GetEntered());
+ data_->realms_[0].Reset(data_->isolate_, Context::GetEntered());
data_->realm_shared_.Clear();
}
@@ -291,143 +287,152 @@ int PerIsolateData::RealmFind(Handle<Context> context) {
// Realm.current() returns the index of the currently active realm.
-Handle<Value> Shell::RealmCurrent(const Arguments& args) {
+void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
int index = data->RealmFind(Context::GetEntered());
- if (index == -1) return Undefined(isolate);
- return Number::New(index);
+ if (index == -1) return;
+ args.GetReturnValue().Set(index);
}
// Realm.owner(o) returns the index of the realm that created o.
-Handle<Value> Shell::RealmOwner(const Arguments& args) {
+void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsObject()) {
- return Throw("Invalid argument");
+ Throw("Invalid argument");
+ return;
}
int index = data->RealmFind(args[0]->ToObject()->CreationContext());
- if (index == -1) return Undefined(isolate);
- return Number::New(index);
+ if (index == -1) return;
+ args.GetReturnValue().Set(index);
}
// Realm.global(i) returns the global object of realm i.
// (Note that properties of global objects cannot be read/written cross-realm.)
-Handle<Value> Shell::RealmGlobal(const Arguments& args) {
+void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
PerIsolateData* data = PerIsolateData::Get(args.GetIsolate());
if (args.Length() < 1 || !args[0]->IsNumber()) {
- return Throw("Invalid argument");
+ Throw("Invalid argument");
+ return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- return Throw("Invalid realm index");
+ Throw("Invalid realm index");
+ return;
}
- return data->realms_[index]->Global();
+ args.GetReturnValue().Set(
+ Local<Context>::New(args.GetIsolate(), data->realms_[index])->Global());
}
// Realm.create() creates a new realm and returns its index.
-Handle<Value> Shell::RealmCreate(const Arguments& args) {
+void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
Persistent<Context>* old_realms = data->realms_;
int index = data->realm_count_;
data->realms_ = new Persistent<Context>[++data->realm_count_];
- for (int i = 0; i < index; ++i) data->realms_[i] = old_realms[i];
+ for (int i = 0; i < index; ++i) {
+ data->realms_[i].Reset(isolate, old_realms[i]);
+ }
delete[] old_realms;
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- data->realms_[index] = Persistent<Context>::New(
+ data->realms_[index].Reset(
isolate, Context::New(isolate, NULL, global_template));
- return Number::New(index);
+ args.GetReturnValue().Set(index);
}
// Realm.dispose(i) disposes the reference to the realm i.
-Handle<Value> Shell::RealmDispose(const Arguments& args) {
+void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsNumber()) {
- return Throw("Invalid argument");
+ Throw("Invalid argument");
+ return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty() ||
index == 0 ||
index == data->realm_current_ || index == data->realm_switch_) {
- return Throw("Invalid realm index");
+ Throw("Invalid realm index");
+ return;
}
data->realms_[index].Dispose(isolate);
data->realms_[index].Clear();
- return Undefined(isolate);
}
// Realm.switch(i) switches to the realm i for consecutive interactive inputs.
-Handle<Value> Shell::RealmSwitch(const Arguments& args) {
+void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsNumber()) {
- return Throw("Invalid argument");
+ Throw("Invalid argument");
+ return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- return Throw("Invalid realm index");
+ Throw("Invalid realm index");
+ return;
}
data->realm_switch_ = index;
- return Undefined(isolate);
}
// Realm.eval(i, s) evaluates s in realm i and returns the result.
-Handle<Value> Shell::RealmEval(const Arguments& args) {
+void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 2 || !args[0]->IsNumber() || !args[1]->IsString()) {
- return Throw("Invalid argument");
+ Throw("Invalid argument");
+ return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- return Throw("Invalid realm index");
+ Throw("Invalid realm index");
+ return;
}
Handle<Script> script = Script::New(args[1]->ToString());
- if (script.IsEmpty()) return Undefined(isolate);
+ if (script.IsEmpty()) return;
Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
realm->Enter();
Handle<Value> result = script->Run();
realm->Exit();
- return result;
+ args.GetReturnValue().Set(result);
}
// Realm.shared is an accessor for a single shared value across realms.
-Handle<Value> Shell::RealmSharedGet(Local<String> property,
- const AccessorInfo& info) {
+void Shell::RealmSharedGet(Local<String> property,
+ const PropertyCallbackInfo<Value>& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (data->realm_shared_.IsEmpty()) return Undefined(isolate);
- return Local<Value>::New(isolate, data->realm_shared_);
+ if (data->realm_shared_.IsEmpty()) return;
+ info.GetReturnValue().Set(data->realm_shared_);
}
void Shell::RealmSharedSet(Local<String> property,
Local<Value> value,
- const AccessorInfo& info) {
+ const PropertyCallbackInfo<void>& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose(isolate);
- data->realm_shared_ = Persistent<Value>::New(isolate, value);
+ data->realm_shared_.Reset(isolate, value);
}
-Handle<Value> Shell::Print(const Arguments& args) {
- Handle<Value> val = Write(args);
+void Shell::Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Write(args);
printf("\n");
fflush(stdout);
- return val;
}
-Handle<Value> Shell::Write(const Arguments& args) {
+void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
if (i != 0) {
@@ -437,7 +442,10 @@ Handle<Value> Shell::Write(const Arguments& args) {
// Explicitly catch potential exceptions in toString().
v8::TryCatch try_catch;
Handle<String> str_obj = args[i]->ToString();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
+ if (try_catch.HasCaught()) {
+ try_catch.ReThrow();
+ return;
+ }
v8::String::Utf8Value str(str_obj);
int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
@@ -446,32 +454,31 @@ Handle<Value> Shell::Write(const Arguments& args) {
Exit(1);
}
}
- return Undefined(args.GetIsolate());
}
-Handle<Value> Shell::EnableProfiler(const Arguments& args) {
+void Shell::EnableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args) {
V8::ResumeProfiler();
- return Undefined(args.GetIsolate());
}
-Handle<Value> Shell::DisableProfiler(const Arguments& args) {
+void Shell::DisableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args) {
V8::PauseProfiler();
- return Undefined(args.GetIsolate());
}
-Handle<Value> Shell::Read(const Arguments& args) {
+void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value file(args[0]);
if (*file == NULL) {
- return Throw("Error loading file");
+ Throw("Error loading file");
+ return;
}
Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- return Throw("Error loading file");
+ Throw("Error loading file");
+ return;
}
- return source;
+ args.GetReturnValue().Set(source);
}
@@ -505,47 +512,52 @@ Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
}
-Handle<Value> Shell::Load(const Arguments& args) {
+void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
String::Utf8Value file(args[i]);
if (*file == NULL) {
- return Throw("Error loading file");
+ Throw("Error loading file");
+ return;
}
Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- return Throw("Error loading file");
+ Throw("Error loading file");
+ return;
}
if (!ExecuteString(args.GetIsolate(),
source,
String::New(*file),
false,
true)) {
- return Throw("Error executing file");
+ Throw("Error executing file");
+ return;
}
}
- return Undefined(args.GetIsolate());
}
-Handle<Value> Shell::Quit(const Arguments& args) {
+void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
int exit_code = args[0]->Int32Value();
OnExit();
exit(exit_code);
- return Undefined(args.GetIsolate());
}
-Handle<Value> Shell::Version(const Arguments& args) {
- return String::New(V8::GetVersion());
+void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetReturnValue().Set(String::New(V8::GetVersion()));
}
void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
HandleScope handle_scope(isolate);
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+ Handle<Context> utility_context;
bool enter_context = !Context::InContext();
- if (enter_context) utility_context_->Enter();
+ if (enter_context) {
+ utility_context = Local<Context>::New(isolate, utility_context_);
+ utility_context->Enter();
+ }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
@@ -582,7 +594,7 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
}
printf("\n");
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- if (enter_context) utility_context_->Exit();
+ if (enter_context) utility_context->Exit();
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
}
@@ -592,11 +604,15 @@ Handle<Array> Shell::GetCompletions(Isolate* isolate,
Handle<String> text,
Handle<String> full) {
HandleScope handle_scope(isolate);
- Context::Scope context_scope(isolate, utility_context_);
- Handle<Object> global = utility_context_->Global();
+ v8::Local<v8::Context> utility_context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Context::Scope context_scope(utility_context);
+ Handle<Object> global = utility_context->Global();
Handle<Value> fun = global->Get(String::New("GetCompletions"));
static const int kArgc = 3;
- Handle<Value> argv[kArgc] = { evaluation_context_->Global(), text, full };
+ v8::Local<v8::Context> evaluation_context =
+ v8::Local<v8::Context>::New(isolate, evaluation_context_);
+ Handle<Value> argv[kArgc] = { evaluation_context->Global(), text, full };
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
return handle_scope.Close(Handle<Array>::Cast(val));
}
@@ -606,8 +622,10 @@ Handle<Array> Shell::GetCompletions(Isolate* isolate,
Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
Handle<String> message) {
HandleScope handle_scope(isolate);
- Context::Scope context_scope(isolate, utility_context_);
- Handle<Object> global = utility_context_->Global();
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Context::Scope context_scope(context);
+ Handle<Object> global = context->Global();
Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { message };
@@ -619,8 +637,10 @@ Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
Handle<String> command) {
HandleScope handle_scope(isolate);
- Context::Scope context_scope(isolate, utility_context_);
- Handle<Object> global = utility_context_->Global();
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Context::Scope context_scope(context);
+ Handle<Object> global = context->Global();
Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { command };
@@ -632,7 +652,9 @@ Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
void Shell::DispatchDebugMessages() {
Isolate* isolate = v8::Isolate::GetCurrent();
HandleScope handle_scope(isolate);
- v8::Context::Scope scope(isolate, Shell::evaluation_context_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, Shell::evaluation_context_);
+ v8::Context::Scope context_scope(context);
v8::Debug::ProcessDebugMessages();
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -743,9 +765,13 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
HandleScope scope(isolate);
// If we use the utility context, we have to set the security tokens so that
// utility, evaluation and debug context can all access each other.
- utility_context_->SetSecurityToken(Undefined(isolate));
- evaluation_context_->SetSecurityToken(Undefined(isolate));
- Context::Scope utility_scope(isolate, utility_context_);
+ v8::Local<v8::Context> utility_context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Local<v8::Context> evaluation_context =
+ v8::Local<v8::Context>::New(isolate, evaluation_context_);
+ utility_context->SetSecurityToken(Undefined(isolate));
+ evaluation_context->SetSecurityToken(Undefined(isolate));
+ v8::Context::Scope context_scope(utility_context);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
@@ -754,7 +780,7 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
debug->Load();
i::Handle<i::JSObject> js_debug
= i::Handle<i::JSObject>(debug->debug_context()->global_object());
- utility_context_->Global()->Set(String::New("$debug"),
+ utility_context->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
debug->debug_context()->set_security_token(HEAP->undefined_value());
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -923,16 +949,17 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
Context::Scope scope(context);
#ifndef V8_SHARED
+ i::Factory* factory = i::Isolate::Current()->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
- FACTORY->NewFixedArray(js_args.argc());
+ factory->NewFixedArray(js_args.argc());
for (int j = 0; j < js_args.argc(); j++) {
i::Handle<i::String> arg =
- FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
+ factory->NewStringFromUtf8(i::CStrVector(js_args[j]));
arguments_array->set(j, *arg);
}
i::Handle<i::JSArray> arguments_jsarray =
- FACTORY->NewJSArrayWithElements(arguments_array);
+ factory->NewJSArrayWithElements(arguments_array);
context->Global()->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
#endif // V8_SHARED
@@ -1048,24 +1075,40 @@ static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
return chars;
}
+static void ReadBufferWeakCallback(v8::Isolate* isolate,
+ Persistent<Value>* object,
+ uint8_t* data) {
+ size_t byte_length = ArrayBuffer::Cast(**object)->ByteLength();
+ isolate->AdjustAmountOfExternalAllocatedMemory(
+ -static_cast<intptr_t>(byte_length));
+
+ delete[] data;
+ object->Dispose(isolate);
+}
-Handle<Value> Shell::ReadBuffer(const Arguments& args) {
+void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
- return Throw("Error loading file");
+ Throw("Error loading file");
+ return;
}
+ Isolate* isolate = args.GetIsolate();
uint8_t* data = reinterpret_cast<uint8_t*>(
ReadChars(args.GetIsolate(), *filename, &length));
if (data == NULL) {
- return Throw("Error reading file");
+ Throw("Error reading file");
+ return;
}
- Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(length);
- memcpy(buffer->Data(), data, length);
- delete[] data;
- return buffer;
+ Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(data, length);
+ v8::Persistent<v8::Value> weak_handle(isolate, buffer);
+ weak_handle.MakeWeak(isolate, data, ReadBufferWeakCallback);
+ weak_handle.MarkIndependent();
+ isolate->AdjustAmountOfExternalAllocatedMemory(length);
+
+ args.GetReturnValue().Set(buffer);
}
@@ -1106,7 +1149,9 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
void Shell::RunShell(Isolate* isolate) {
Locker locker(isolate);
HandleScope outer_scope(isolate);
- Context::Scope context_scope(isolate, evaluation_context_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, evaluation_context_);
+ v8::Context::Scope context_scope(context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Handle<String> name = String::New("(d8)");
LineEditor* console = LineEditor::Get();
@@ -1526,6 +1571,13 @@ static void EnableHarmonyTypedArraysViaCommandLine() {
#endif
+class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ virtual void* Allocate(size_t length) { return malloc(length); }
+ virtual void Free(void* data) { free(data); }
+};
+
+
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
#ifndef V8_SHARED
@@ -1534,6 +1586,8 @@ int Shell::Main(int argc, char* argv[]) {
#else
EnableHarmonyTypedArraysViaCommandLine();
#endif
+ ShellArrayBufferAllocator array_buffer_allocator;
+ v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
int result = 0;
Isolate* isolate = Isolate::GetCurrent();
DumbLineEditor dumb_line_editor(isolate);
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index c068dd9db1..804cc4655f 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -300,45 +300,46 @@ class Shell : public i::AllStatic {
#endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED
- static Handle<Value> RealmCurrent(const Arguments& args);
- static Handle<Value> RealmOwner(const Arguments& args);
- static Handle<Value> RealmGlobal(const Arguments& args);
- static Handle<Value> RealmCreate(const Arguments& args);
- static Handle<Value> RealmDispose(const Arguments& args);
- static Handle<Value> RealmSwitch(const Arguments& args);
- static Handle<Value> RealmEval(const Arguments& args);
- static Handle<Value> RealmSharedGet(Local<String> property,
- const AccessorInfo& info);
+ static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmSharedGet(Local<String> property,
+ const PropertyCallbackInfo<Value>& info);
static void RealmSharedSet(Local<String> property,
Local<Value> value,
- const AccessorInfo& info);
-
- static Handle<Value> Print(const Arguments& args);
- static Handle<Value> Write(const Arguments& args);
- static Handle<Value> Quit(const Arguments& args);
- static Handle<Value> Version(const Arguments& args);
- static Handle<Value> EnableProfiler(const Arguments& args);
- static Handle<Value> DisableProfiler(const Arguments& args);
- static Handle<Value> Read(const Arguments& args);
- static Handle<Value> ReadBuffer(const Arguments& args);
+ const PropertyCallbackInfo<void>& info);
+
+ static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void EnableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void DisableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
static Handle<String> ReadFromStdin(Isolate* isolate);
- static Handle<Value> ReadLine(const Arguments& args) {
- return ReadFromStdin(args.GetIsolate());
+ static void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetReturnValue().Set(ReadFromStdin(args.GetIsolate()));
}
- static Handle<Value> Load(const Arguments& args);
- static Handle<Value> ArrayBuffer(const Arguments& args);
- static Handle<Value> Int8Array(const Arguments& args);
- static Handle<Value> Uint8Array(const Arguments& args);
- static Handle<Value> Int16Array(const Arguments& args);
- static Handle<Value> Uint16Array(const Arguments& args);
- static Handle<Value> Int32Array(const Arguments& args);
- static Handle<Value> Uint32Array(const Arguments& args);
- static Handle<Value> Float32Array(const Arguments& args);
- static Handle<Value> Float64Array(const Arguments& args);
- static Handle<Value> Uint8ClampedArray(const Arguments& args);
- static Handle<Value> ArrayBufferSlice(const Arguments& args);
- static Handle<Value> ArraySubArray(const Arguments& args);
- static Handle<Value> ArraySet(const Arguments& args);
+ static void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ArrayBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Int8Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Uint8Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Int16Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Uint16Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Int32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Uint32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Float32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Float64Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Uint8ClampedArray(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ArrayBufferSlice(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ArraySubArray(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ArraySet(const v8::FunctionCallbackInfo<v8::Value>& args);
// The OS object on the global object contains methods for performing
// operating system calls:
//
@@ -365,14 +366,14 @@ class Shell : public i::AllStatic {
// with the current umask. Intermediate directories are created if necessary.
// An exception is not thrown if the directory already exists. Analogous to
// the "mkdir -p" command.
- static Handle<Value> OSObject(const Arguments& args);
- static Handle<Value> System(const Arguments& args);
- static Handle<Value> ChangeDirectory(const Arguments& args);
- static Handle<Value> SetEnvironment(const Arguments& args);
- static Handle<Value> UnsetEnvironment(const Arguments& args);
- static Handle<Value> SetUMask(const Arguments& args);
- static Handle<Value> MakeDirectory(const Arguments& args);
- static Handle<Value> RemoveDirectory(const Arguments& args);
+ static void OSObject(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void System(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void AddOSMethods(Handle<ObjectTemplate> os_template);
@@ -412,9 +413,10 @@ class Shell : public i::AllStatic {
int32_t byteLength,
int32_t byteOffset,
int32_t element_size);
- static Handle<Value> CreateExternalArray(const Arguments& args,
- ExternalArrayType type,
- int32_t element_size);
+ static void CreateExternalArray(
+ const v8::FunctionCallbackInfo<v8::Value>& args,
+ ExternalArrayType type,
+ int32_t element_size);
static void ExternalArrayWeakCallback(Isolate* isolate,
Persistent<Object>* object,
uint8_t* data);
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 02ec1248f0..5d26ba2b13 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -121,7 +121,7 @@ BreakLocationIterator::~BreakLocationIterator() {
void BreakLocationIterator::Next() {
- AssertNoAllocation nogc;
+ DisallowHeapAllocation no_gc;
ASSERT(!RinfoDone());
// Iterate through reloc info for code and original code stopping at each
@@ -211,14 +211,15 @@ void BreakLocationIterator::Next(int count) {
}
-// Find the break point closest to the supplied address.
+// Find the break point at the supplied address, or the closest one before
+// the address.
void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
// Run through all break points to locate the one closest to the address.
int closest_break_point = 0;
int distance = kMaxInt;
while (!Done()) {
// Check if this break point is closer that what was previously found.
- if (this->pc() < pc && pc - this->pc() < distance) {
+ if (this->pc() <= pc && pc - this->pc() < distance) {
closest_break_point = break_point();
distance = static_cast<int>(pc - this->pc());
// Check whether we can't get any closer.
@@ -619,14 +620,14 @@ void ScriptCache::Add(Handle<Script> script) {
(global_handles->Create(*script)));
global_handles->MakeWeak(reinterpret_cast<Object**>(script_.location()),
this,
- NULL,
ScriptCache::HandleWeakScript);
entry->value = script_.location();
}
Handle<FixedArray> ScriptCache::GetScripts() {
- Handle<FixedArray> instances = FACTORY->NewFixedArray(occupancy());
+ Factory* factory = Isolate::Current()->factory();
+ Handle<FixedArray> instances = factory->NewFixedArray(occupancy());
int count = 0;
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry->value != NULL);
@@ -664,12 +665,12 @@ void ScriptCache::Clear() {
void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
+ v8::Persistent<v8::Value>* obj,
void* data) {
ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
// Find the location of the global handle.
Script** location =
- reinterpret_cast<Script**>(Utils::OpenHandle(*obj).location());
+ reinterpret_cast<Script**>(Utils::OpenHandle(**obj).location());
ASSERT((*location)->IsScript());
// Remove the entry from the cache.
@@ -678,8 +679,7 @@ void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
- obj.Dispose(isolate);
- obj.Clear();
+ obj->Dispose(isolate);
}
@@ -699,7 +699,7 @@ void Debug::SetUp(bool create_heap_objects) {
void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
+ v8::Persistent<v8::Value>* obj,
void* data) {
Debug* debug = reinterpret_cast<Isolate*>(isolate)->debug();
DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
@@ -727,7 +727,6 @@ DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
(global_handles->Create(debug_info)));
global_handles->MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
this,
- NULL,
Debug::HandleWeakDebugInfo);
}
@@ -790,7 +789,7 @@ bool Debug::CompileDebuggerScript(int index) {
MessageLocation computed_location;
isolate->ComputeLocation(&computed_location);
Handle<Object> message = MessageHandler::MakeMessageObject(
- "error_loading_debugger", &computed_location,
+ isolate, "error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
@@ -945,7 +944,9 @@ Object* Debug::Break(Arguments args) {
// Find the break point where execution has stopped.
BreakLocationIterator break_location_iterator(debug_info,
ALL_BREAK_LOCATIONS);
- break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+ // pc points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
// Check whether step next reached a new statement.
if (!StepNextContinue(&break_location_iterator, frame)) {
@@ -1240,15 +1241,11 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
// Get information in the break point.
BreakPointInfo* break_point_info = BreakPointInfo::cast(result);
Handle<DebugInfo> debug_info = node->debug_info();
- Handle<SharedFunctionInfo> shared(debug_info->shared());
- int source_position = break_point_info->statement_position()->value();
-
- // Source positions starts with zero.
- ASSERT(source_position >= 0);
// Find the break point and clear it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(source_position);
+ it.FindBreakLocationFromAddress(debug_info->code()->entry() +
+ break_point_info->code_position()->value());
it.ClearBreakPoint(break_point_object);
// If there are no more break points left remove the debug info for this
@@ -1406,7 +1403,9 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// Find the break location where execution has stopped.
BreakLocationIterator it(debug_info, ALL_BREAK_LOCATIONS);
- it.FindBreakLocationFromAddress(frame->pc());
+ // pc points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ it.FindBreakLocationFromAddress(frame->pc() - 1);
// Compute whether or not the target is a call target.
bool is_load_or_store = false;
@@ -2025,7 +2024,7 @@ void Debug::PrepareForBreakPoints() {
// Ensure no GC in this scope as we are going to use gc_metadata
// field in the Code object to mark active functions.
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Object* active_code_marker = heap->the_hole_value();
@@ -2140,7 +2139,7 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
while (!done) {
{ // Extra scope for iterator and no-allocation.
heap->EnsureHeapIsIterable();
- AssertNoAllocation no_alloc_during_heap_iteration;
+ DisallowHeapAllocation no_alloc_during_heap_iteration;
HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next();
obj != NULL; obj = iterator.next()) {
@@ -2229,6 +2228,8 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// Ensures the debug information is present for shared.
bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
Handle<JSFunction> function) {
+ Isolate* isolate = shared->GetIsolate();
+
// Return if we already have the debug info for shared.
if (HasDebugInfo(shared)) {
ASSERT(shared->is_compiled());
@@ -2245,7 +2246,7 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
}
// Create the debug info object.
- Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
+ Handle<DebugInfo> debug_info = isolate->factory()->NewDebugInfo(shared);
// Add debug info to the list.
DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
@@ -2476,7 +2477,7 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator(heap);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index ccdc0c05e6..467acb93e8 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -190,7 +190,7 @@ class ScriptCache : private HashMap {
// Weak handle callback for scripts in the cache.
static void HandleWeakScript(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
+ v8::Persistent<v8::Value>* obj,
void* data);
// List used during GC to temporarily store id's of collected scripts.
@@ -387,7 +387,7 @@ class Debug {
// Passed to MakeWeak.
static void HandleWeakDebugInfo(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
+ v8::Persistent<v8::Value>* obj,
void* data);
friend class Debugger;
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 26410e9de5..723d3f692e 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -285,7 +285,7 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
Isolate* isolate = context->GetIsolate();
ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
ASSERT(context->IsNativeContext());
@@ -313,7 +313,7 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
void Deoptimizer::VisitAllOptimizedFunctions(
Isolate* isolate,
OptimizedFunctionVisitor* visitor) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
Object* context = isolate->heap()->native_contexts_list();
@@ -335,7 +335,7 @@ static void PartitionOptimizedFunctions(Context* context,
ZoneList<Code*>* partitions,
Zone* zone,
Object* undefined) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
Object* remainder_head = undefined;
Object* remainder_tail = undefined;
@@ -388,7 +388,7 @@ class DeoptimizeWithMatchingCodeFilter : public OptimizedFunctionFilter {
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
if (FLAG_trace_deopt) {
PrintF("[deoptimize all contexts]\n");
@@ -400,7 +400,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
DeoptimizeAllFilter filter;
if (object->IsJSGlobalProxy()) {
Object* proto = object->GetPrototype();
@@ -451,7 +451,7 @@ void Deoptimizer::DeoptimizeAllFunctionsForContext(
void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate,
OptimizedFunctionFilter* filter) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
Object* context = isolate->heap()->native_contexts_list();
@@ -463,7 +463,7 @@ void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate,
void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
+ v8::Persistent<v8::Value>* obj,
void* parameter) {
DeoptimizingCodeListNode* node =
reinterpret_cast<DeoptimizingCodeListNode*>(parameter);
@@ -558,7 +558,10 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
trace_ = TraceEnabledFor(type, frame_type);
- ASSERT(HEAP->allow_allocation(false));
+#ifdef DEBUG
+ CHECK(AllowHeapAllocation::IsAllowed());
+ disallow_heap_allocation_ = new DisallowHeapAllocation();
+#endif // DEBUG
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
input_->SetFrameType(frame_type);
@@ -608,6 +611,7 @@ void Deoptimizer::PrintFunctionName() {
Deoptimizer::~Deoptimizer() {
ASSERT(input_ == NULL && output_ == NULL);
+ ASSERT(disallow_heap_allocation_ == NULL);
}
@@ -619,7 +623,12 @@ void Deoptimizer::DeleteFrameDescriptions() {
delete[] output_;
input_ = NULL;
output_ = NULL;
- ASSERT(!HEAP->allow_allocation(true));
+#ifdef DEBUG
+ CHECK(!AllowHeapAllocation::IsAllowed());
+ CHECK(disallow_heap_allocation_ != NULL);
+ delete disallow_heap_allocation_;
+ disallow_heap_allocation_ = NULL;
+#endif // DEBUG
}
@@ -1977,56 +1986,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
-static bool ObjectToInt32(Object* obj, int32_t* value) {
- if (obj->IsSmi()) {
- *value = Smi::cast(obj)->value();
- return true;
- }
-
- if (obj->IsHeapNumber()) {
- double num = HeapNumber::cast(obj)->value();
- if (FastI2D(FastD2I(num)) != num) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to int32 ****\n",
- HeapNumber::cast(obj)->value());
- }
- return false;
- }
-
- *value = FastD2I(num);
- return true;
- }
-
- return false;
-}
-
-
-static bool ObjectToUint32(Object* obj, uint32_t* value) {
- if (obj->IsSmi()) {
- if (Smi::cast(obj)->value() < 0) return false;
-
- *value = static_cast<uint32_t>(Smi::cast(obj)->value());
- return true;
- }
-
- if (obj->IsHeapNumber()) {
- double num = HeapNumber::cast(obj)->value();
- if ((num < 0) || (FastUI2D(FastD2UI(num)) != num)) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to uint32 ****\n",
- HeapNumber::cast(obj)->value());
- }
- return false;
- }
-
- *value = FastD2UI(num);
- return true;
- }
-
- return false;
-}
-
-
bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int* input_offset) {
disasm::NameConverter converter;
@@ -2070,7 +2029,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_REGISTER: {
int32_t int32_value = 0;
- if (!ObjectToInt32(input_object, &int32_value)) return false;
+ if (!input_object->ToInt32(&int32_value)) return false;
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
@@ -2085,7 +2044,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_REGISTER: {
uint32_t uint32_value = 0;
- if (!ObjectToUint32(input_object, &uint32_value)) return false;
+ if (!input_object->ToUint32(&uint32_value)) return false;
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
@@ -2132,7 +2091,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_STACK_SLOT: {
int32_t int32_value = 0;
- if (!ObjectToInt32(input_object, &int32_value)) return false;
+ if (!input_object->ToInt32(&int32_value)) return false;
int output_index = iterator->Next();
unsigned output_offset =
@@ -2149,7 +2108,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_STACK_SLOT: {
uint32_t uint32_value = 0;
- if (!ObjectToUint32(input_object, &uint32_value)) return false;
+ if (!input_object->ToUint32(&uint32_value)) return false;
int output_index = iterator->Next();
unsigned output_offset =
@@ -2750,7 +2709,6 @@ DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
code_ = Handle<Code>::cast(global_handles->Create(code));
global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
this,
- NULL,
Deoptimizer::HandleWeakDeoptimizedCode);
}
@@ -2855,7 +2813,7 @@ Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
JavaScriptFrame* frame,
int inlined_jsframe_index,
int formal_parameter_count) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index c1b3a9d25e..5569f7ffd8 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -38,6 +38,24 @@
namespace v8 {
namespace internal {
+
+static inline double read_double_value(Address p) {
+#ifdef V8_HOST_CAN_READ_UNALIGNED
+ return Memory::double_at(p);
+#else // V8_HOST_CAN_READ_UNALIGNED
+ // Prevent gcc from using load-double (mips ldc1) on (possibly)
+ // non-64-bit aligned address.
+ union conversion {
+ double d;
+ uint32_t u[2];
+ } c;
+ c.u[0] = *reinterpret_cast<uint32_t*>(p);
+ c.u[1] = *reinterpret_cast<uint32_t*>(p + 4);
+ return c.d;
+#endif // V8_HOST_CAN_READ_UNALIGNED
+}
+
+
class FrameDescription;
class TranslationIterator;
class DeoptimizingCodeListNode;
@@ -385,7 +403,7 @@ class Deoptimizer : public Malloced {
// Weak handle callback for deoptimizing code objects.
static void HandleWeakDeoptimizedCode(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
+ v8::Persistent<v8::Value>* obj,
void* data);
// Deoptimize function assuming that function->next_function_link() points
@@ -431,6 +449,9 @@ class Deoptimizer : public Malloced {
List<Object*> deferred_arguments_objects_values_;
List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
+#ifdef DEBUG
+ DisallowHeapAllocation* disallow_heap_allocation_;
+#endif // DEBUG
bool trace_;
@@ -476,19 +497,7 @@ class FrameDescription {
double GetDoubleFrameSlot(unsigned offset) {
intptr_t* ptr = GetFrameSlotPointer(offset);
-#if V8_TARGET_ARCH_MIPS
- // Prevent gcc from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned double. Uses two lwc1 instructions.
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
- c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
- return c.d;
-#else
- return *reinterpret_cast<double*>(ptr);
-#endif
+ return read_double_value(reinterpret_cast<Address>(ptr));
}
void SetFrameSlot(unsigned offset, intptr_t value) {
@@ -818,7 +827,7 @@ class SlotRef BASE_EMBEDDED {
}
case DOUBLE: {
- double value = Memory::double_at(addr_);
+ double value = read_double_value(addr_);
return isolate->factory()->NewNumber(value);
}
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index e41734babd..5ec1dcb77c 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -115,8 +115,8 @@ static int DecodeIt(Isolate* isolate,
const V8NameConverter& converter,
byte* begin,
byte* end) {
- NoHandleAllocation ha(isolate);
- AssertNoAllocation no_alloc;
+ SealHandleScope shs(isolate);
+ DisallowHeapAllocation no_alloc;
ExternalReferenceEncoder ref_encoder;
Heap* heap = HEAP;
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 7c2b56851b..77abf4e42b 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -155,7 +155,7 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to_base->map() != HEAP->fixed_cow_array_map());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -204,7 +204,7 @@ static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
uint32_t to_start,
int raw_copy_size) {
SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
if (raw_copy_size < 0) {
@@ -840,7 +840,7 @@ class ElementsAccessorBase : public ElementsAccessor {
// Fill in the content
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len0; i++) {
Object* e = to->get(i);
@@ -2044,7 +2044,7 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
}
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
FixedArray* object_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 6d8c3c1022..38e7a3bec9 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -107,7 +107,7 @@ static Handle<Object> Invoke(bool is_construct,
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
@@ -641,7 +641,8 @@ Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
Handle<Object> Execution::NewDate(double time, bool* exc) {
- Handle<Object> time_obj = FACTORY->NewNumber(time);
+ Isolate* isolate = Isolate::Current();
+ Handle<Object> time_obj = isolate->factory()->NewNumber(time);
RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
}
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 0f6b639fa1..a3630fb9f5 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -72,26 +72,29 @@ v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
}
-v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
- const v8::Arguments& args) {
+void ExternalizeStringExtension::Externalize(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsString()) {
- return v8::ThrowException(v8::String::New(
+ v8::ThrowException(v8::String::New(
"First parameter to externalizeString() must be a string."));
+ return;
}
bool force_two_byte = false;
if (args.Length() >= 2) {
if (args[1]->IsBoolean()) {
force_two_byte = args[1]->BooleanValue();
} else {
- return v8::ThrowException(v8::String::New(
- "Second parameter to externalizeString() must be a boolean."));
+ v8::ThrowException(v8::String::New(
+ "Second parameter to externalizeString() must be a boolean."));
+ return;
}
}
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
if (string->IsExternalString()) {
- return v8::ThrowException(v8::String::New(
+ v8::ThrowException(v8::String::New(
"externalizeString() can't externalize twice."));
+ return;
}
if (string->IsOneByteRepresentation() && !force_two_byte) {
uint8_t* data = new uint8_t[string->length()];
@@ -115,21 +118,22 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
if (!result) delete resource;
}
if (!result) {
- return v8::ThrowException(v8::String::New("externalizeString() failed."));
+ v8::ThrowException(v8::String::New("externalizeString() failed."));
+ return;
}
- return v8::Undefined();
}
-v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
- const v8::Arguments& args) {
+void ExternalizeStringExtension::IsAscii(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- return v8::ThrowException(v8::String::New(
+ v8::ThrowException(v8::String::New(
"isAsciiString() requires a single string argument."));
+ return;
}
- return
- Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation() ?
- v8::True() : v8::False();
+ bool is_one_byte =
+ Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation();
+ args.GetReturnValue().Set(is_one_byte);
}
diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h
index b97b4962cf..ecbc1cf447 100644
--- a/deps/v8/src/extensions/externalize-string-extension.h
+++ b/deps/v8/src/extensions/externalize-string-extension.h
@@ -38,8 +38,8 @@ class ExternalizeStringExtension : public v8::Extension {
ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> Externalize(const v8::Arguments& args);
- static v8::Handle<v8::Value> IsAscii(const v8::Arguments& args);
+ static void Externalize(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void IsAscii(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
private:
static const char* const kSource;
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 1a2fe8ff4b..036b60cb23 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -38,13 +38,12 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
}
-v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
+void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args[0]->BooleanValue()) {
HEAP->CollectGarbage(NEW_SPACE, "gc extension");
} else {
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
}
- return v8::Undefined();
}
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index 54b865adf7..e412b92a4d 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -38,7 +38,7 @@ class GCExtension : public v8::Extension {
explicit GCExtension(const char* source) : v8::Extension("v8/gc", source) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> GC(const v8::Arguments& args);
+ static void GC(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
};
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 71b70a533d..e5a3009e80 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -58,8 +58,8 @@ static void AddNumber(v8::Local<v8::Object> object,
}
-v8::Handle<v8::Value> StatisticsExtension::GetCounters(
- const v8::Arguments& args) {
+void StatisticsExtension::GetCounters(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
@@ -141,7 +141,7 @@ v8::Handle<v8::Value> StatisticsExtension::GetCounters(
"lo_space_commited_bytes");
AddNumber(result, heap->amount_of_external_allocated_memory(),
"amount_of_external_allocated_memory");
- return result;
+ args.GetReturnValue().Set(result);
}
diff --git a/deps/v8/src/extensions/statistics-extension.h b/deps/v8/src/extensions/statistics-extension.h
index 433c4cf687..bfd9c4134e 100644
--- a/deps/v8/src/extensions/statistics-extension.h
+++ b/deps/v8/src/extensions/statistics-extension.h
@@ -38,7 +38,7 @@ class StatisticsExtension : public v8::Extension {
StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> GetCounters(const v8::Arguments& args);
+ static void GetCounters(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
private:
static const char* const kSource;
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index fe71a225b5..c47b57d017 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -41,6 +41,14 @@ namespace v8 {
namespace internal {
+Handle<Box> Factory::NewBox(Handle<Object> value, PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateBox(*value, pretenure),
+ Box);
+}
+
+
Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
ASSERT(0 <= size);
CALL_HEAP_FUNCTION(
@@ -476,8 +484,7 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
Handle<Object> value) {
- ALLOW_HANDLE_DEREF(isolate(),
- "converting a handle into a global property cell");
+ AllowDeferredHandleDereference convert_to_cell;
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSGlobalPropertyCell(*value),
@@ -677,9 +684,9 @@ Handle<JSObject> Factory::NewNeanderObject() {
}
-Handle<Object> Factory::NewTypeError(const char* type,
+Handle<Object> Factory::NewTypeError(const char* message,
Vector< Handle<Object> > args) {
- return NewError("MakeTypeError", type, args);
+ return NewError("MakeTypeError", message, args);
}
@@ -688,9 +695,9 @@ Handle<Object> Factory::NewTypeError(Handle<String> message) {
}
-Handle<Object> Factory::NewRangeError(const char* type,
+Handle<Object> Factory::NewRangeError(const char* message,
Vector< Handle<Object> > args) {
- return NewError("MakeRangeError", type, args);
+ return NewError("MakeRangeError", message, args);
}
@@ -699,8 +706,9 @@ Handle<Object> Factory::NewRangeError(Handle<String> message) {
}
-Handle<Object> Factory::NewSyntaxError(const char* type, Handle<JSArray> args) {
- return NewError("MakeSyntaxError", type, args);
+Handle<Object> Factory::NewSyntaxError(const char* message,
+ Handle<JSArray> args) {
+ return NewError("MakeSyntaxError", message, args);
}
@@ -709,9 +717,9 @@ Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
}
-Handle<Object> Factory::NewReferenceError(const char* type,
+Handle<Object> Factory::NewReferenceError(const char* message,
Vector< Handle<Object> > args) {
- return NewError("MakeReferenceError", type, args);
+ return NewError("MakeReferenceError", message, args);
}
@@ -721,7 +729,7 @@ Handle<Object> Factory::NewReferenceError(Handle<String> message) {
Handle<Object> Factory::NewError(const char* maker,
- const char* type,
+ const char* message,
Vector< Handle<Object> > args) {
// Instantiate a closeable HandleScope for EscapeFrom.
v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
@@ -730,24 +738,24 @@ Handle<Object> Factory::NewError(const char* maker,
array->set(i, *args[i]);
}
Handle<JSArray> object = NewJSArrayWithElements(array);
- Handle<Object> result = NewError(maker, type, object);
+ Handle<Object> result = NewError(maker, message, object);
return result.EscapeFrom(&scope);
}
-Handle<Object> Factory::NewEvalError(const char* type,
+Handle<Object> Factory::NewEvalError(const char* message,
Vector< Handle<Object> > args) {
- return NewError("MakeEvalError", type, args);
+ return NewError("MakeEvalError", message, args);
}
-Handle<Object> Factory::NewError(const char* type,
+Handle<Object> Factory::NewError(const char* message,
Vector< Handle<Object> > args) {
- return NewError("MakeError", type, args);
+ return NewError("MakeError", message, args);
}
-Handle<String> Factory::EmergencyNewError(const char* type,
+Handle<String> Factory::EmergencyNewError(const char* message,
Handle<JSArray> args) {
const int kBufferSize = 1000;
char buffer[kBufferSize];
@@ -755,8 +763,8 @@ Handle<String> Factory::EmergencyNewError(const char* type,
char* p = &buffer[0];
Vector<char> v(buffer, kBufferSize);
- OS::StrNCpy(v, type, space);
- space -= Min(space, strlen(type));
+ OS::StrNCpy(v, message, space);
+ space -= Min(space, strlen(message));
p = &buffer[kBufferSize] - space;
for (unsigned i = 0; i < ARRAY_SIZE(args); i++) {
@@ -785,7 +793,7 @@ Handle<String> Factory::EmergencyNewError(const char* type,
Handle<Object> Factory::NewError(const char* maker,
- const char* type,
+ const char* message,
Handle<JSArray> args) {
Handle<String> make_str = InternalizeUtf8String(maker);
Handle<Object> fun_obj(
@@ -794,11 +802,11 @@ Handle<Object> Factory::NewError(const char* maker,
// If the builtins haven't been properly configured yet this error
// constructor may not have been defined. Bail out.
if (!fun_obj->IsJSFunction()) {
- return EmergencyNewError(type, args);
+ return EmergencyNewError(message, args);
}
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
- Handle<Object> type_obj = InternalizeUtf8String(type);
- Handle<Object> argv[] = { type_obj, args };
+ Handle<Object> message_obj = InternalizeUtf8String(message);
+ Handle<Object> argv[] = { message_obj, args };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 233b3b01c8..d59d7423ae 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -39,6 +39,11 @@ namespace internal {
class Factory {
public:
+ // Allocate a new boxed value.
+ Handle<Box> NewBox(
+ Handle<Object> value,
+ PretenureFlag pretenure = NOT_TENURED);
+
// Allocate a new uninitialized fixed array.
Handle<FixedArray> NewFixedArray(
int size,
@@ -369,33 +374,33 @@ class Factory {
// Interface for creating error objects.
- Handle<Object> NewError(const char* maker, const char* type,
+ Handle<Object> NewError(const char* maker, const char* message,
Handle<JSArray> args);
- Handle<String> EmergencyNewError(const char* type, Handle<JSArray> args);
- Handle<Object> NewError(const char* maker, const char* type,
+ Handle<String> EmergencyNewError(const char* message, Handle<JSArray> args);
+ Handle<Object> NewError(const char* maker, const char* message,
Vector< Handle<Object> > args);
- Handle<Object> NewError(const char* type,
+ Handle<Object> NewError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewError(Handle<String> message);
Handle<Object> NewError(const char* constructor,
Handle<String> message);
- Handle<Object> NewTypeError(const char* type,
+ Handle<Object> NewTypeError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewTypeError(Handle<String> message);
- Handle<Object> NewRangeError(const char* type,
+ Handle<Object> NewRangeError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewRangeError(Handle<String> message);
- Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
+ Handle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
Handle<Object> NewSyntaxError(Handle<String> message);
- Handle<Object> NewReferenceError(const char* type,
+ Handle<Object> NewReferenceError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewReferenceError(Handle<String> message);
- Handle<Object> NewEvalError(const char* type,
+ Handle<Object> NewEvalError(const char* message,
Vector< Handle<Object> > args);
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 58f29b4d9c..b70a532afc 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -170,6 +170,7 @@ DEFINE_bool(harmony_array_buffer, false,
"enable harmony array buffer")
DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
+DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
@@ -177,7 +178,9 @@ DEFINE_implication(harmony, harmony_symbols)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
-DEFINE_implication(harmony, harmony_generators)
+// TODO(wingo): Re-enable when GC bug that appeared in r15060 is gone.
+// DEFINE_implication(harmony, harmony_generators)
+DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
// TODO[dslomov] add harmony => harmony_typed_arrays
@@ -192,12 +195,17 @@ DEFINE_bool(compiled_keyed_stores, true, "use optimizing compiler to "
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
-DEFINE_bool(pretenure_literals, true, "allocate literals in old space")
+DEFINE_bool(pretenuring, true, "allocate objects in old space")
+// TODO(hpayer): We will remove this flag as soon as we have pretenuring
+// support for specific allocation sites.
+DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
+DEFINE_bool(track_computed_fields, true, "track computed boilerplate fields")
DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields)
+DEFINE_implication(track_computed_fields, track_fields)
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@@ -251,6 +259,8 @@ DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
DEFINE_bool(array_index_dehoisting, true,
"perform array index dehoisting")
+DEFINE_bool(analyze_environment_liveness, true,
+ "analyze liveness of environment slots and zap dead values")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
@@ -258,7 +268,7 @@ DEFINE_bool(unreachable_code_elimination, false,
"eliminate unreachable code (hidden behind soft deopts)")
DEFINE_bool(track_allocation_sites, true,
"Use allocation site info to reduce transitions")
-DEFINE_bool(optimize_constructed_arrays, false,
+DEFINE_bool(optimize_constructed_arrays, true,
"Use allocation site info on constructed arrays")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
@@ -377,6 +387,8 @@ DEFINE_bool(stack_trace_on_abort, true,
"print a stack trace if an assertion failure occurs")
// codegen-ia32.cc / codegen-arm.cc
+DEFINE_bool(trace_codegen, false,
+ "print name of functions for which code is generated")
DEFINE_bool(trace, false, "trace function calls")
DEFINE_bool(mask_constants_with_cookie,
true,
@@ -636,8 +648,6 @@ DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
// codegen-ia32.cc / codegen-arm.cc
-DEFINE_bool(trace_codegen, false,
- "print name of functions for which code is generated")
DEFINE_bool(print_source, false, "pretty print source code")
DEFINE_bool(print_builtin_source, false,
"pretty print source code for builtins")
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 152cd30d5b..53f510849e 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -777,7 +777,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
bool print_line_number) {
// constructor calls
HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
JavaScriptFrameIterator it(isolate);
while (!it.done()) {
if (it.frame()->is_java_script()) {
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index dc646b1a98..bad634cf3f 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -163,6 +163,12 @@ void BreakableStatementChecker::VisitForInStatement(ForInStatement* stmt) {
}
+void BreakableStatementChecker::VisitForOfStatement(ForOfStatement* stmt) {
+ // For-of is breakable because of the next() call.
+ is_breakable_ = true;
+}
+
+
void BreakableStatementChecker::VisitTryCatchStatement(
TryCatchStatement* stmt) {
// Mark try catch as breakable to avoid adding a break slot in front of it.
@@ -304,10 +310,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
int len = String::cast(script->source())->length();
isolate->counters()->total_full_codegen_source_size()->Increment(len);
}
- if (FLAG_trace_codegen) {
- PrintF("Full Compiler - ");
- }
- CodeGenerator::MakeCodePrologue(info);
+ CodeGenerator::MakeCodePrologue(info, "full");
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
#ifdef ENABLE_GDB_JIT_INTERFACE
@@ -923,10 +926,10 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitGeneratorSend(CallRuntime* expr) {
+void FullCodeGenerator::EmitGeneratorNext(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
- EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::SEND);
+ EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::NEXT);
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 32242b297b..68263a5dc6 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -491,6 +491,11 @@ class FullCodeGenerator: public AstVisitor {
INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
// Platform-specific code for resuming generators.
void EmitGeneratorResume(Expression *generator,
Expression *value,
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index d08f2fe418..5717a96079 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -2062,7 +2062,7 @@ void GDBJITInterface::AddCode(const char* name,
if (!FLAG_gdbjit) return;
ScopedLock lock(mutex.Pointer());
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
diff --git a/deps/v8/src/generator.js b/deps/v8/src/generator.js
index 5e61091565..cc31a44588 100644
--- a/deps/v8/src/generator.js
+++ b/deps/v8/src/generator.js
@@ -34,26 +34,16 @@
// ----------------------------------------------------------------------------
-// TODO(wingo): Give link to specification. For now, the following diagram is
-// the spec:
-// http://wiki.ecmascript.org/lib/exe/fetch.php?cache=cache&media=harmony:es6_generator_object_model_3-29-13.png
+// Generator functions and objects are specified by ES6, sections 15.19.3 and
+// 15.19.4.
-function GeneratorObjectNext() {
+function GeneratorObjectNext(value) {
if (!IS_GENERATOR(this)) {
throw MakeTypeError('incompatible_method_receiver',
['[Generator].prototype.next', this]);
}
- return %_GeneratorSend(this, void 0);
-}
-
-function GeneratorObjectSend(value) {
- if (!IS_GENERATOR(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['[Generator].prototype.send', this]);
- }
-
- return %_GeneratorSend(this, value);
+ return %_GeneratorNext(this, value);
}
function GeneratorObjectThrow(exn) {
@@ -71,7 +61,6 @@ function SetUpGenerators() {
InstallFunctions(GeneratorObjectPrototype,
DONT_ENUM | DONT_DELETE | READ_ONLY,
["next", GeneratorObjectNext,
- "send", GeneratorObjectSend,
"throw", GeneratorObjectThrow]);
%SetProperty(GeneratorObjectPrototype, "constructor",
GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY);
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 29432bb5b7..b601e99900 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "api.h"
@@ -92,7 +89,7 @@ class GlobalHandles::Node {
set_partially_dependent(false);
set_in_new_space_list(false);
parameter_or_next_free_.next_free = NULL;
- near_death_callback_ = NULL;
+ weak_reference_callback_ = NULL;
}
#endif
@@ -105,7 +102,7 @@ class GlobalHandles::Node {
*first_free = this;
}
- void Acquire(Object* object, GlobalHandles* global_handles) {
+ void Acquire(Object* object) {
ASSERT(state() == FREE);
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
@@ -113,11 +110,11 @@ class GlobalHandles::Node {
set_partially_dependent(false);
set_state(NORMAL);
parameter_or_next_free_.parameter = NULL;
- near_death_callback_ = NULL;
- IncreaseBlockUses(global_handles);
+ weak_reference_callback_ = NULL;
+ IncreaseBlockUses();
}
- void Release(GlobalHandles* global_handles) {
+ void Release() {
ASSERT(state() != FREE);
set_state(FREE);
#ifdef ENABLE_EXTRA_CHECKS
@@ -126,11 +123,9 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
set_partially_dependent(false);
- near_death_callback_ = NULL;
+ weak_reference_callback_ = NULL;
#endif
- parameter_or_next_free_.next_free = global_handles->first_free_;
- global_handles->first_free_ = this;
- DecreaseBlockUses(global_handles);
+ DecreaseBlockUses();
}
// Object slot accessors.
@@ -201,9 +196,9 @@ class GlobalHandles::Node {
set_independent(true);
}
- void MarkPartiallyDependent(GlobalHandles* global_handles) {
+ void MarkPartiallyDependent() {
ASSERT(state() != FREE);
- if (global_handles->isolate()->heap()->InNewSpace(object_)) {
+ if (GetGlobalHandles()->isolate()->heap()->InNewSpace(object_)) {
set_partially_dependent(true);
}
}
@@ -233,41 +228,31 @@ class GlobalHandles::Node {
parameter_or_next_free_.next_free = value;
}
- void MakeWeak(GlobalHandles* global_handles,
- void* parameter,
- RevivableCallback weak_reference_callback,
- NearDeathCallback near_death_callback) {
+ void MakeWeak(void* parameter,
+ RevivableCallback weak_reference_callback) {
ASSERT(state() != FREE);
set_state(WEAK);
set_parameter(parameter);
- if (weak_reference_callback != NULL) {
- flags_ = IsWeakCallback::update(flags_, true);
- near_death_callback_ =
- reinterpret_cast<NearDeathCallback>(weak_reference_callback);
- } else {
- flags_ = IsWeakCallback::update(flags_, false);
- near_death_callback_ = near_death_callback;
- }
+ weak_reference_callback_ = weak_reference_callback;
}
- void ClearWeakness(GlobalHandles* global_handles) {
+ void ClearWeakness() {
ASSERT(state() != FREE);
set_state(NORMAL);
set_parameter(NULL);
}
- bool PostGarbageCollectionProcessing(Isolate* isolate,
- GlobalHandles* global_handles) {
+ bool PostGarbageCollectionProcessing(Isolate* isolate) {
if (state() != Node::PENDING) return false;
- if (near_death_callback_ == NULL) {
- Release(global_handles);
+ if (weak_reference_callback_ == NULL) {
+ Release();
return false;
}
void* par = parameter();
set_state(NEAR_DEATH);
set_parameter(NULL);
- v8::Persistent<v8::Value> object = ToApi<v8::Value>(handle());
+ Object** object = location();
{
// Check that we are not passing a finalized external string to
// the callback.
@@ -277,19 +262,9 @@ class GlobalHandles::Node {
ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
VMState<EXTERNAL> state(isolate);
- if (near_death_callback_ != NULL) {
- if (IsWeakCallback::decode(flags_)) {
- RevivableCallback callback =
- reinterpret_cast<RevivableCallback>(near_death_callback_);
- callback(reinterpret_cast<v8::Isolate*>(isolate),
- &object,
- par);
- } else {
- near_death_callback_(reinterpret_cast<v8::Isolate*>(isolate),
- object,
+ weak_reference_callback_(reinterpret_cast<v8::Isolate*>(isolate),
+ reinterpret_cast<Persistent<Value>*>(&object),
par);
- }
- }
}
// Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
@@ -299,8 +274,9 @@ class GlobalHandles::Node {
private:
inline NodeBlock* FindBlock();
- inline void IncreaseBlockUses(GlobalHandles* global_handles);
- inline void DecreaseBlockUses(GlobalHandles* global_handles);
+ inline GlobalHandles* GetGlobalHandles();
+ inline void IncreaseBlockUses();
+ inline void DecreaseBlockUses();
// Storage for object pointer.
// Placed first to avoid offset computation.
@@ -321,12 +297,11 @@ class GlobalHandles::Node {
class IsIndependent: public BitField<bool, 4, 1> {};
class IsPartiallyDependent: public BitField<bool, 5, 1> {};
class IsInNewSpaceList: public BitField<bool, 6, 1> {};
- class IsWeakCallback: public BitField<bool, 7, 1> {};
uint8_t flags_;
// Handle specific callback - might be a weak reference in disguise.
- NearDeathCallback near_death_callback_;
+ RevivableCallback weak_reference_callback_;
// Provided data for callback. In FREE state, this is used for
// the free list link.
@@ -343,8 +318,12 @@ class GlobalHandles::NodeBlock {
public:
static const int kSize = 256;
- explicit NodeBlock(NodeBlock* next)
- : next_(next), used_nodes_(0), next_used_(NULL), prev_used_(NULL) {}
+ explicit NodeBlock(GlobalHandles* global_handles, NodeBlock* next)
+ : next_(next),
+ used_nodes_(0),
+ next_used_(NULL),
+ prev_used_(NULL),
+ global_handles_(global_handles) {}
void PutNodesOnFreeList(Node** first_free) {
for (int i = kSize - 1; i >= 0; --i) {
@@ -357,11 +336,11 @@ class GlobalHandles::NodeBlock {
return &nodes_[index];
}
- void IncreaseUses(GlobalHandles* global_handles) {
+ void IncreaseUses() {
ASSERT(used_nodes_ < kSize);
if (used_nodes_++ == 0) {
- NodeBlock* old_first = global_handles->first_used_block_;
- global_handles->first_used_block_ = this;
+ NodeBlock* old_first = global_handles_->first_used_block_;
+ global_handles_->first_used_block_ = this;
next_used_ = old_first;
prev_used_ = NULL;
if (old_first == NULL) return;
@@ -369,17 +348,19 @@ class GlobalHandles::NodeBlock {
}
}
- void DecreaseUses(GlobalHandles* global_handles) {
+ void DecreaseUses() {
ASSERT(used_nodes_ > 0);
if (--used_nodes_ == 0) {
if (next_used_ != NULL) next_used_->prev_used_ = prev_used_;
if (prev_used_ != NULL) prev_used_->next_used_ = next_used_;
- if (this == global_handles->first_used_block_) {
- global_handles->first_used_block_ = next_used_;
+ if (this == global_handles_->first_used_block_) {
+ global_handles_->first_used_block_ = next_used_;
}
}
}
+ GlobalHandles* global_handles() { return global_handles_; }
+
// Next block in the list of all blocks.
NodeBlock* next() const { return next_; }
@@ -393,9 +374,15 @@ class GlobalHandles::NodeBlock {
int used_nodes_;
NodeBlock* next_used_;
NodeBlock* prev_used_;
+ GlobalHandles* global_handles_;
};
+GlobalHandles* GlobalHandles::Node::GetGlobalHandles() {
+ return FindBlock()->global_handles();
+}
+
+
GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
intptr_t ptr = reinterpret_cast<intptr_t>(this);
ptr = ptr - index_ * sizeof(Node);
@@ -405,13 +392,23 @@ GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
}
-void GlobalHandles::Node::IncreaseBlockUses(GlobalHandles* global_handles) {
- FindBlock()->IncreaseUses(global_handles);
+void GlobalHandles::Node::IncreaseBlockUses() {
+ NodeBlock* node_block = FindBlock();
+ node_block->IncreaseUses();
+ GlobalHandles* global_handles = node_block->global_handles();
+ global_handles->isolate()->counters()->global_handles()->Increment();
+ global_handles->number_of_global_handles_++;
}
-void GlobalHandles::Node::DecreaseBlockUses(GlobalHandles* global_handles) {
- FindBlock()->DecreaseUses(global_handles);
+void GlobalHandles::Node::DecreaseBlockUses() {
+ NodeBlock* node_block = FindBlock();
+ GlobalHandles* global_handles = node_block->global_handles();
+ parameter_or_next_free_.next_free = global_handles->first_free_;
+ global_handles->first_free_ = this;
+ node_block->DecreaseUses();
+ global_handles->isolate()->counters()->global_handles()->Decrement();
+ global_handles->number_of_global_handles_--;
}
@@ -465,17 +462,15 @@ GlobalHandles::~GlobalHandles() {
Handle<Object> GlobalHandles::Create(Object* value) {
- isolate_->counters()->global_handles()->Increment();
- number_of_global_handles_++;
if (first_free_ == NULL) {
- first_block_ = new NodeBlock(first_block_);
+ first_block_ = new NodeBlock(this, first_block_);
first_block_->PutNodesOnFreeList(&first_free_);
}
ASSERT(first_free_ != NULL);
// Take the first node in the free list.
Node* result = first_free_;
first_free_ = result->next_free();
- result->Acquire(value, this);
+ result->Acquire(value);
if (isolate_->heap()->InNewSpace(value) &&
!result->is_in_new_space_list()) {
new_space_nodes_.Add(result);
@@ -486,27 +481,20 @@ Handle<Object> GlobalHandles::Create(Object* value) {
void GlobalHandles::Destroy(Object** location) {
- isolate_->counters()->global_handles()->Decrement();
- number_of_global_handles_--;
- if (location == NULL) return;
- Node::FromLocation(location)->Release(this);
+ if (location != NULL) Node::FromLocation(location)->Release();
}
void GlobalHandles::MakeWeak(Object** location,
void* parameter,
- RevivableCallback weak_reference_callback,
- NearDeathCallback near_death_callback) {
- ASSERT((weak_reference_callback == NULL) != (near_death_callback == NULL));
- Node::FromLocation(location)->MakeWeak(this,
- parameter,
- weak_reference_callback,
- near_death_callback);
+ RevivableCallback weak_reference_callback) {
+ ASSERT(weak_reference_callback != NULL);
+ Node::FromLocation(location)->MakeWeak(parameter, weak_reference_callback);
}
void GlobalHandles::ClearWeakness(Object** location) {
- Node::FromLocation(location)->ClearWeakness(this);
+ Node::FromLocation(location)->ClearWeakness();
}
@@ -516,7 +504,7 @@ void GlobalHandles::MarkIndependent(Object** location) {
void GlobalHandles::MarkPartiallyDependent(Object** location) {
- Node::FromLocation(location)->MarkPartiallyDependent(this);
+ Node::FromLocation(location)->MarkPartiallyDependent();
}
@@ -653,7 +641,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
continue;
}
node->clear_partially_dependent();
- if (node->PostGarbageCollectionProcessing(isolate_, this)) {
+ if (node->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// Weak callback triggered another GC and another round of
// PostGarbageCollection processing. The current node might
@@ -669,7 +657,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
it.node()->clear_partially_dependent();
- if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
+ if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
return next_gc_likely_to_collect_more;
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index f502dfa247..ac26e009c4 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -128,7 +128,7 @@ class GlobalHandles {
Handle<Object> Create(Object* value);
// Destroy a global handle.
- void Destroy(Object** location);
+ static void Destroy(Object** location);
typedef WeakReferenceCallbacks<v8::Value, void>::Revivable RevivableCallback;
@@ -138,10 +138,9 @@ class GlobalHandles {
// function is invoked (for each handle) with the handle and corresponding
// parameter as arguments. Note: cleared means set to Smi::FromInt(0). The
// reason is that Smi::FromInt(0) does not change during garage collection.
- void MakeWeak(Object** location,
- void* parameter,
- RevivableCallback weak_reference_callback,
- NearDeathCallback near_death_callback);
+ static void MakeWeak(Object** location,
+ void* parameter,
+ RevivableCallback weak_reference_callback);
void RecordStats(HeapStats* stats);
@@ -158,13 +157,13 @@ class GlobalHandles {
}
// Clear the weakness of a global handle.
- void ClearWeakness(Object** location);
+ static void ClearWeakness(Object** location);
// Clear the weakness of a global handle.
- void MarkIndependent(Object** location);
+ static void MarkIndependent(Object** location);
// Mark the reference to this object externaly unreachable.
- void MarkPartiallyDependent(Object** location);
+ static void MarkPartiallyDependent(Object** location);
static bool IsIndependent(Object** location);
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 4f5e9fe720..4f4490b75b 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -57,7 +57,8 @@ inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
if (location_ == other.location_) return true;
if (location_ == NULL || other.location_ == NULL) return false;
// Dereferencing deferred handles to check object equality is safe.
- SLOW_ASSERT(IsDereferenceAllowed(true) && other.IsDereferenceAllowed(true));
+ SLOW_ASSERT(IsDereferenceAllowed(NO_DEFERRED_CHECK) &&
+ other.IsDereferenceAllowed(NO_DEFERRED_CHECK));
return *location_ == *other.location_;
}
@@ -65,20 +66,21 @@ inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
template <typename T>
inline T* Handle<T>::operator*() const {
ASSERT(location_ != NULL && !(*location_)->IsFailure());
- SLOW_ASSERT(IsDereferenceAllowed(false));
+ SLOW_ASSERT(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
return *BitCast<T**>(location_);
}
template <typename T>
inline T** Handle<T>::location() const {
ASSERT(location_ == NULL || !(*location_)->IsFailure());
- SLOW_ASSERT(location_ == NULL || IsDereferenceAllowed(false));
+ SLOW_ASSERT(location_ == NULL ||
+ IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
return location_;
}
#ifdef DEBUG
template <typename T>
-bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
+bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
ASSERT(location_ != NULL);
Object* object = *BitCast<T**>(location_);
if (object->IsSmi()) return true;
@@ -90,22 +92,15 @@ bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
handle < roots_array_start + Heap::kStrongRootListLength) {
return true;
}
- if (isolate->optimizing_compiler_thread()->IsOptimizerThread() &&
- !Heap::RelocationLock::IsLockedByOptimizerThread(isolate->heap())) {
- return false;
+ if (!AllowHandleDereference::IsAllowed()) return false;
+ if (mode == INCLUDE_DEFERRED_CHECK &&
+ !AllowDeferredHandleDereference::IsAllowed()) {
+ // Accessing maps and internalized strings is safe.
+ if (heap_object->IsMap()) return true;
+ if (heap_object->IsInternalizedString()) return true;
+ return !isolate->IsDeferredHandle(handle);
}
- switch (isolate->HandleDereferenceGuardState()) {
- case HandleDereferenceGuard::ALLOW:
- return true;
- case HandleDereferenceGuard::DISALLOW:
- return false;
- case HandleDereferenceGuard::DISALLOW_DEFERRED:
- // Accessing maps and internalized strings is safe.
- if (heap_object->IsMap()) return true;
- if (heap_object->IsInternalizedString()) return true;
- return allow_deferred || !isolate->IsDeferredHandle(handle);
- }
- return false;
+ return true;
}
#endif
@@ -122,31 +117,37 @@ HandleScope::HandleScope(Isolate* isolate) {
HandleScope::~HandleScope() {
- CloseScope();
+ CloseScope(isolate_, prev_next_, prev_limit_);
}
-void HandleScope::CloseScope() {
+
+void HandleScope::CloseScope(Isolate* isolate,
+ Object** prev_next,
+ Object** prev_limit) {
v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- current->next = prev_next_;
+ isolate->handle_scope_data();
+
+ current->next = prev_next;
current->level--;
- if (current->limit != prev_limit_) {
- current->limit = prev_limit_;
- DeleteExtensions(isolate_);
+ if (current->limit != prev_limit) {
+ current->limit = prev_limit;
+ DeleteExtensions(isolate);
}
+
#ifdef ENABLE_EXTRA_CHECKS
- ZapRange(prev_next_, prev_limit_);
+ ZapRange(prev_next, prev_limit);
#endif
}
template <typename T>
Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
- T* value = *handle_value;
- // Throw away all handles in the current scope.
- CloseScope();
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
+
+ T* value = *handle_value;
+ // Throw away all handles in the current scope.
+ CloseScope(isolate_, prev_next_, prev_limit_);
// Allocate one handle in the parent scope.
ASSERT(current->level > 0);
Handle<T> result(CreateHandle<T>(isolate_, value));
@@ -161,6 +162,7 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
template <typename T>
T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
+ ASSERT(AllowHandleAllocation::IsAllowed());
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
@@ -178,44 +180,29 @@ T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
#ifdef DEBUG
-inline NoHandleAllocation::NoHandleAllocation(Isolate* isolate)
- : isolate_(isolate) {
+inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
+ // Make sure the current thread is allowed to create handles to begin with.
+ CHECK(AllowHandleAllocation::IsAllowed());
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
-
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- // Shrink the current handle scope to make it impossible to do
- // handle allocations without an explicit handle scope.
- current->limit = current->next;
-
- level_ = current->level;
- current->level = 0;
- }
-}
-
-
-inline NoHandleAllocation::~NoHandleAllocation() {
- if (active_) {
- // Restore state in current handle scope to re-enable handle
- // allocations.
- v8::ImplementationUtilities::HandleScopeData* data =
- isolate_->handle_scope_data();
- ASSERT_EQ(0, data->level);
- data->level = level_;
- }
+ // Shrink the current handle scope to make it impossible to do
+ // handle allocations without an explicit handle scope.
+ limit_ = current->limit;
+ current->limit = current->next;
+ level_ = current->level;
+ current->level = 0;
}
-HandleDereferenceGuard::HandleDereferenceGuard(Isolate* isolate, State state)
- : isolate_(isolate) {
- old_state_ = isolate_->HandleDereferenceGuardState();
- isolate_->SetHandleDereferenceGuardState(state);
-}
-
-
-HandleDereferenceGuard::~HandleDereferenceGuard() {
- isolate_->SetHandleDereferenceGuardState(old_state_);
+inline SealHandleScope::~SealHandleScope() {
+ // Restore state in current handle scope to re-enable handle
+ // allocations.
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate_->handle_scope_data();
+ ASSERT_EQ(0, current->level);
+ current->level = level_;
+ ASSERT_EQ(current->next, current->limit);
+ current->limit = limit_;
}
#endif
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 7a8d5c90bc..81828d98cc 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -345,9 +345,9 @@ Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
// associated with the wrapper and get rid of both the wrapper and the
// handle.
static void ClearWrapperCache(v8::Isolate* v8_isolate,
- Persistent<v8::Value> handle,
+ Persistent<v8::Value>* handle,
void*) {
- Handle<Object> cache = Utils::OpenHandle(*handle);
+ Handle<Object> cache = Utils::OpenHandle(**handle);
JSValue* wrapper = JSValue::cast(*cache);
Foreign* foreign = Script::cast(wrapper->value())->wrapper();
ASSERT(foreign->foreign_address() ==
@@ -388,7 +388,6 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
Handle<Object> handle = isolate->global_handles()->Create(*result);
isolate->global_handles()->MakeWeak(handle.location(),
NULL,
- NULL,
&ClearWrapperCache);
script->wrapper()->set_foreign_address(
reinterpret_cast<Address>(handle.location()));
@@ -457,7 +456,7 @@ Handle<FixedArray> CalculateLineEnds(Handle<String> src,
List<int> line_ends(line_count_estimate);
Isolate* isolate = src->GetIsolate();
{
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid.
+ DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
// Dispatch on type of strings.
String::FlatContent content = src->GetFlatContent();
ASSERT(content.IsFlat());
@@ -485,7 +484,7 @@ Handle<FixedArray> CalculateLineEnds(Handle<String> src,
// Convert code position into line number.
int GetScriptLineNumber(Handle<Script> script, int code_pos) {
InitScriptLineEnds(script);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
const int line_ends_len = line_ends_array->length();
@@ -512,7 +511,7 @@ int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
int line_number = GetScriptLineNumber(script, code_pos);
if (line_number == -1) return -1;
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
line_number = line_number - script->line_offset()->value();
if (line_number == 0) return code_pos + script->column_offset()->value();
@@ -522,7 +521,7 @@ int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
}
int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
if (!script->line_ends()->IsUndefined()) {
return GetScriptLineNumber(script, code_pos);
}
@@ -567,7 +566,8 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
#if ENABLE_EXTRA_CHECKS
CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
#endif
- return result;
+ return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
+ result);
}
@@ -592,7 +592,8 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
#endif
}
}
- return result;
+ return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
+ result);
}
@@ -802,7 +803,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
if (details.type() != FIELD) {
indices = Handle<FixedArray>();
} else {
- int field_index = Descriptor::IndexFromValue(descs->GetValue(i));
+ int field_index = descs->GetFieldIndex(i);
if (field_index >= map->inobject_properties()) {
field_index = -(field_index - map->inobject_properties() + 1);
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 938d43b8a4..0cd4f5bca9 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -61,7 +61,7 @@ class Handle {
location_ = reinterpret_cast<T**>(handle.location_);
}
- INLINE(T* operator ->() const) { return operator*(); }
+ INLINE(T* operator->() const) { return operator*(); }
// Check if this handle refers to the exact same object as the other handle.
INLINE(bool is_identical_to(const Handle<T> other) const);
@@ -85,7 +85,9 @@ class Handle {
inline Handle<T> EscapeFrom(v8::HandleScope* scope);
#ifdef DEBUG
- bool IsDereferenceAllowed(bool allow_deferred) const;
+ enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
+
+ bool IsDereferenceAllowed(DereferenceCheckMode mode) const;
#endif // DEBUG
private:
@@ -155,18 +157,21 @@ class HandleScope {
void* operator new(size_t size);
void operator delete(void* size_t);
- inline void CloseScope();
-
Isolate* isolate_;
Object** prev_next_;
Object** prev_limit_;
+ // Close the handle scope resetting limits to a previous state.
+ static inline void CloseScope(Isolate* isolate,
+ Object** prev_next,
+ Object** prev_limit);
+
// Extend the handle scope making room for more handles.
static internal::Object** Extend(Isolate* isolate);
#ifdef ENABLE_EXTRA_CHECKS
// Zaps the handles in the half-open interval [start, end).
- static void ZapRange(internal::Object** start, internal::Object** end);
+ static void ZapRange(Object** start, Object** end);
#endif
friend class v8::HandleScope;
@@ -327,45 +332,24 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<Object> key,
Handle<Object> value);
-class NoHandleAllocation BASE_EMBEDDED {
- public:
-#ifndef DEBUG
- explicit NoHandleAllocation(Isolate* isolate) {}
- ~NoHandleAllocation() {}
-#else
- explicit inline NoHandleAllocation(Isolate* isolate);
- inline ~NoHandleAllocation();
- private:
- Isolate* isolate_;
- int level_;
- bool active_;
-#endif
-};
-
-class HandleDereferenceGuard BASE_EMBEDDED {
+// Seal off the current HandleScope so that new handles can only be created
+// if a new HandleScope is entered.
+class SealHandleScope BASE_EMBEDDED {
public:
- enum State { ALLOW, DISALLOW, DISALLOW_DEFERRED };
#ifndef DEBUG
- HandleDereferenceGuard(Isolate* isolate, State state) { }
- ~HandleDereferenceGuard() { }
+ explicit SealHandleScope(Isolate* isolate) {}
+ ~SealHandleScope() {}
#else
- inline HandleDereferenceGuard(Isolate* isolate, State state);
- inline ~HandleDereferenceGuard();
+ explicit inline SealHandleScope(Isolate* isolate);
+ inline ~SealHandleScope();
private:
Isolate* isolate_;
- State old_state_;
+ Object** limit_;
+ int level_;
#endif
};
-#ifdef DEBUG
-#define ALLOW_HANDLE_DEREF(isolate, why_this_is_safe) \
- HandleDereferenceGuard allow_deref(isolate, \
- HandleDereferenceGuard::ALLOW);
-#else
-#define ALLOW_HANDLE_DEREF(isolate, why_this_is_safe)
-#endif // DEBUG
-
} } // namespace v8::internal
#endif // V8_HANDLES_H_
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index b71978baf5..f0861b2e7a 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -211,8 +211,7 @@ MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
MaybeObject* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space,
AllocationSpace retry_space) {
- SLOW_ASSERT(!isolate_->optimizing_compiler_thread()->IsOptimizerThread());
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+ ASSERT(AllowHandleAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE ||
retry_space == OLD_DATA_SPACE ||
@@ -642,21 +641,6 @@ Isolate* Heap::isolate() {
return __maybe_object__)
-#ifdef DEBUG
-
-inline bool Heap::allow_allocation(bool new_state) {
- bool old = allocation_allowed_;
- allocation_allowed_ = new_state;
- return old;
-}
-
-inline void Heap::set_allow_allocation(bool allocation_allowed) {
- allocation_allowed_ = allocation_allowed;
-}
-
-#endif
-
-
void ExternalStringTable::AddString(String* string) {
ASSERT(string->IsExternalString());
if (heap_->InNewSpace(string)) {
@@ -867,52 +851,6 @@ DisallowAllocationFailure::~DisallowAllocationFailure() {
}
-#ifdef DEBUG
-bool EnterAllocationScope(Isolate* isolate, bool allow_allocation) {
- bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- bool last_state = isolate->heap()->IsAllocationAllowed();
- if (active) {
- // TODO(yangguo): Make HandleDereferenceGuard avoid isolate mutation in the
- // same way if running on the optimizer thread.
- isolate->heap()->set_allow_allocation(allow_allocation);
- }
- return last_state;
-}
-
-
-void ExitAllocationScope(Isolate* isolate, bool last_state) {
- bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active) {
- isolate->heap()->set_allow_allocation(last_state);
- }
-}
-
-
-AssertNoAllocation::AssertNoAllocation()
- : last_state_(EnterAllocationScope(ISOLATE, false)) {
-}
-
-AssertNoAllocation::~AssertNoAllocation() {
- ExitAllocationScope(ISOLATE, last_state_);
-}
-
-DisableAssertNoAllocation::DisableAssertNoAllocation()
- : last_state_(EnterAllocationScope(ISOLATE, true)) {
-}
-
-DisableAssertNoAllocation::~DisableAssertNoAllocation() {
- ExitAllocationScope(ISOLATE, last_state_);
-}
-#else
-
-AssertNoAllocation::AssertNoAllocation() { }
-AssertNoAllocation::~AssertNoAllocation() { }
-DisableAssertNoAllocation::DisableAssertNoAllocation() { }
-DisableAssertNoAllocation::~DisableAssertNoAllocation() { }
-
-#endif
-
-
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index f488304f43..b8237a6b13 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -637,7 +637,7 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
// First perform a full GC in order to avoid dead objects.
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"HeapSnapshotsCollection::FindHeapObjectById");
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
HeapObject* object = NULL;
HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
// Make sure that object with the given id is still reachable.
@@ -1210,10 +1210,6 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"inferred_name", shared->inferred_name(),
SharedFunctionInfo::kInferredNameOffset);
- SetInternalReference(obj, entry,
- "this_property_assignments",
- shared->this_property_assignments(),
- SharedFunctionInfo::kThisPropertyAssignmentsOffset);
SetWeakReference(obj, entry,
1, shared->initial_map(),
SharedFunctionInfo::kInitialMapOffset);
@@ -1827,7 +1823,7 @@ void V8HeapExplorer::TagGlobalObjects() {
}
}
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
for (int i = 0, l = enumerator.count(); i < l; ++i) {
objects_tags_.SetTag(*enumerator.at(i), urls[i]);
}
@@ -2219,7 +2215,7 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
// The following code uses heap iterators, so we want the heap to be
// stable. It should follow TagGlobalObjects as that can allocate.
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_alloc;
#ifdef VERIFY_HEAP
debug_heap->Verify();
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 98844f05e9..2817fcba58 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -113,13 +113,11 @@ Heap::Heap()
remembered_unmapped_pages_index_(0),
unflattened_strings_length_(0),
#ifdef DEBUG
- allocation_allowed_(true),
allocation_timeout_(0),
disallow_allocation_failure_(false),
#endif // DEBUG
new_space_high_promotion_mode_active_(false),
- old_gen_promotion_limit_(kMinimumPromotionLimit),
- old_gen_allocation_limit_(kMinimumAllocationLimit),
+ old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
size_of_old_gen_at_last_old_space_gc_(0),
external_allocation_limit_(0),
amount_of_external_allocated_memory_(0),
@@ -182,6 +180,7 @@ Heap::Heap()
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
native_contexts_list_ = NULL;
+ array_buffers_list_ = Smi::FromInt(0);
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
@@ -282,7 +281,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
}
// Is enough data promoted to justify a global GC?
- if (OldGenerationPromotionLimitReached()) {
+ if (OldGenerationAllocationLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
*reason = "promotion limit reached";
return MARK_COMPACTOR;
@@ -419,24 +418,25 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
- isolate_->transcendental_cache()->Clear();
- ClearJSFunctionResultCaches();
- gc_count_++;
- unflattened_strings_length_ = 0;
+ { AllowHeapAllocation for_the_first_part_of_prologue;
+ isolate_->transcendental_cache()->Clear();
+ ClearJSFunctionResultCaches();
+ gc_count_++;
+ unflattened_strings_length_ = 0;
- if (FLAG_flush_code && FLAG_flush_code_incrementally) {
- mark_compact_collector()->EnableCodeFlushing(true);
- }
+ if (FLAG_flush_code && FLAG_flush_code_incrementally) {
+ mark_compact_collector()->EnableCodeFlushing(true);
+ }
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
+ if (FLAG_verify_heap) {
+ Verify();
+ }
#endif
+ }
#ifdef DEBUG
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- allow_allocation(false);
+ ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
if (FLAG_gc_verbose) Print();
@@ -481,8 +481,9 @@ void Heap::GarbageCollectionEpilogue() {
}
#endif
+ AllowHeapAllocation for_the_rest_of_the_epilogue;
+
#ifdef DEBUG
- allow_allocation(true);
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
@@ -644,6 +645,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
bool next_gc_likely_to_collect_more = false;
{ GCTracer tracer(this, gc_reason, collector_reason);
+ ASSERT(AllowHeapAllocation::IsAllowed());
+ DisallowHeapAllocation no_allocation_during_gc;
GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about
// it.
@@ -916,10 +919,8 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
- old_gen_promotion_limit_ =
- OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
- old_gen_allocation_limit_ =
- OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+ old_generation_allocation_limit_ =
+ OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_exhausted_ = false;
} else {
@@ -938,7 +939,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// maximum capacity indicates that most objects will be promoted.
// To decrease scavenger pauses and final mark-sweep pauses, we
// have to limit maximal capacity of the young generation.
- new_space_high_promotion_mode_active_ = true;
+ SetNewSpaceHighPromotionModeActive(true);
if (FLAG_trace_gc) {
PrintPID("Limited new space size due to high promotion rate: %d MB\n",
new_space_.InitialCapacity() / MB);
@@ -947,7 +948,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// heuristic indicator of whether to pretenure or not, we trigger
// deoptimization here to take advantage of pre-tenuring as soon as
// possible.
- if (FLAG_pretenure_literals) {
+ if (FLAG_pretenuring) {
isolate_->stack_guard()->FullDeopt();
}
} else if (new_space_high_promotion_mode_active_ &&
@@ -956,14 +957,14 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// Decreasing low survival rates might indicate that the above high
// promotion mode is over and we should allow the young generation
// to grow again.
- new_space_high_promotion_mode_active_ = false;
+ SetNewSpaceHighPromotionModeActive(false);
if (FLAG_trace_gc) {
PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
new_space_.MaximumCapacity() / MB);
}
// Trigger deoptimization here to turn off pre-tenuring as soon as
// possible.
- if (FLAG_pretenure_literals) {
+ if (FLAG_pretenuring) {
isolate_->stack_guard()->FullDeopt();
}
}
@@ -980,7 +981,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
gc_post_processing_depth_++;
- { DisableAssertNoAllocation allow_allocation;
+ { AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
isolate_->global_handles()->PostGarbageCollectionProcessing(
@@ -1539,11 +1540,6 @@ static Object* ProcessFunctionWeakReferences(Heap* heap,
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
- Object* undefined = undefined_value();
- Object* head = undefined;
- Context* tail = NULL;
- Object* candidate = native_contexts_list_;
-
// We don't record weak slots during marking or scavenges.
// Instead we do it once when we complete mark-compact cycle.
// Note that write barrier has no effect if we are already in the middle of
@@ -1551,6 +1547,16 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
bool record_slots =
gc_state() == MARK_COMPACT &&
mark_compact_collector()->is_compacting();
+ ProcessArrayBuffers(retainer, record_slots);
+ ProcessNativeContexts(retainer, record_slots);
+}
+
+void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* undefined = undefined_value();
+ Object* head = undefined;
+ Context* tail = NULL;
+ Object* candidate = native_contexts_list_;
while (candidate != undefined) {
// Check whether to keep the candidate in the list.
@@ -1619,8 +1625,103 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
}
+template <class T>
+struct WeakListVisitor;
+
+
+template <class T>
+static Object* VisitWeakList(Object* list,
+ MarkCompactCollector* collector,
+ WeakObjectRetainer* retainer, bool record_slots) {
+ Object* head = Smi::FromInt(0);
+ T* tail = NULL;
+ while (list != Smi::FromInt(0)) {
+ Object* retained = retainer->RetainAs(list);
+ if (retained != NULL) {
+ if (head == Smi::FromInt(0)) {
+ head = retained;
+ } else {
+ ASSERT(tail != NULL);
+ WeakListVisitor<T>::set_weak_next(tail, retained);
+ if (record_slots) {
+ Object** next_slot =
+ HeapObject::RawField(tail, WeakListVisitor<T>::kWeakNextOffset);
+ collector->RecordSlot(next_slot, next_slot, retained);
+ }
+ }
+ tail = reinterpret_cast<T*>(retained);
+ WeakListVisitor<T>::VisitLiveObject(
+ tail, collector, retainer, record_slots);
+ }
+ list = WeakListVisitor<T>::get_weak_next(reinterpret_cast<T*>(list));
+ }
+ if (tail != NULL) {
+ tail->set_weak_next(Smi::FromInt(0));
+ }
+ return head;
+}
+
+
+template<>
+struct WeakListVisitor<JSTypedArray> {
+ static void set_weak_next(JSTypedArray* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* get_weak_next(JSTypedArray* obj) {
+ return obj->weak_next();
+ }
+
+ static void VisitLiveObject(JSTypedArray* obj,
+ MarkCompactCollector* collector,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {}
+
+ static const int kWeakNextOffset = JSTypedArray::kWeakNextOffset;
+};
+
+
+template<>
+struct WeakListVisitor<JSArrayBuffer> {
+ static void set_weak_next(JSArrayBuffer* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* get_weak_next(JSArrayBuffer* obj) {
+ return obj->weak_next();
+ }
+
+ static void VisitLiveObject(JSArrayBuffer* array_buffer,
+ MarkCompactCollector* collector,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* typed_array_obj =
+ VisitWeakList<JSTypedArray>(array_buffer->weak_first_array(),
+ collector, retainer, record_slots);
+ array_buffer->set_weak_first_array(typed_array_obj);
+ if (typed_array_obj != Smi::FromInt(0) && record_slots) {
+ Object** slot = HeapObject::RawField(
+ array_buffer, JSArrayBuffer::kWeakFirstArrayOffset);
+ collector->RecordSlot(slot, slot, typed_array_obj);
+ }
+ }
+
+ static const int kWeakNextOffset = JSArrayBuffer::kWeakNextOffset;
+};
+
+
+void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* array_buffer_obj =
+ VisitWeakList<JSArrayBuffer>(array_buffers_list(),
+ mark_compact_collector(),
+ retainer, record_slots);
+ set_array_buffers_list(array_buffer_obj);
+}
+
+
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
// Both the external string table and the string table may contain
// external strings, but neither lists them exhaustively, nor is the
@@ -1794,6 +1895,14 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
+ table_.Register(kVisitJSArrayBuffer,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ Visit);
+
+ table_.Register(kVisitJSTypedArray,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ Visit);
+
table_.Register(kVisitJSRegExp,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
@@ -2679,7 +2788,6 @@ MaybeObject* Heap::AllocateHeapNumber(double value) {
// This version of AllocateHeapNumber is optimized for
// allocation in new space.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
Object* result;
{ MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2702,6 +2810,15 @@ MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
}
+MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
+ Box* result;
+ MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
+ if (!maybe_result->To(&result)) return maybe_result;
+ result->set_value(value);
+ return result;
+}
+
+
MaybeObject* Heap::CreateOddball(const char* to_string,
Object* to_number,
byte kind) {
@@ -2846,6 +2963,13 @@ bool Heap::CreateInitialObjects() {
}
set_the_hole_value(Oddball::cast(obj));
+ { MaybeObject* maybe_obj = CreateOddball("uninitialized",
+ Smi::FromInt(-1),
+ Oddball::kUninitialized);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_uninitialized_value(Oddball::cast(obj));
+
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Smi::FromInt(-4),
Oddball::kArgumentMarker);
@@ -2964,7 +3088,18 @@ bool Heap::CreateInitialObjects() {
}
set_observation_state(JSObject::cast(obj));
- // Handling of script id generation is in FACTORY->NewScript.
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_frozen_symbol(Symbol::cast(obj));
+
+ { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
+ set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
+
+ // Handling of script id generation is in Factory::NewScript.
set_last_script_id(undefined_value());
// Initialize keyed lookup cache.
@@ -3368,7 +3503,6 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
share->set_counters(0);
@@ -3383,7 +3517,6 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_function_token_position(0);
// All compiler hints default to false or 0.
share->set_compiler_hints(0);
- share->set_this_property_assignments_count(0);
share->set_opt_count(0);
return share;
@@ -3567,7 +3700,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
ConsString* cons_string = ConsString::cast(result);
WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
cons_string->set_length(length);
@@ -3648,7 +3781,7 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
SlicedString* sliced_string = SlicedString::cast(result);
sliced_string->set_length(length);
sliced_string->set_hash_field(String::kEmptyHashField);
@@ -4113,7 +4246,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// This calls Copy directly rather than using Heap::AllocateRaw so we
// duplicate the check here.
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+ ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
// Check that the size of the boilerplate matches our
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
@@ -4152,20 +4285,6 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
}
-static bool HasDuplicates(DescriptorArray* descriptors) {
- int count = descriptors->number_of_descriptors();
- if (count > 1) {
- Name* prev_key = descriptors->GetKey(0);
- for (int i = 1; i != count; i++) {
- Name* current_key = descriptors->GetKey(i);
- if (prev_key == current_key) return true;
- prev_key = current_key;
- }
- }
- return false;
-}
-
-
MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
ASSERT(!fun->has_initial_map());
@@ -4200,48 +4319,6 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_prototype(prototype);
ASSERT(map->has_fast_object_elements());
- // If the function has only simple this property assignments add
- // field descriptors for these to the initial map as the object
- // cannot be constructed without having these properties. Guard by
- // the inline_new flag so we only change the map if we generate a
- // specialized construct stub.
- ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
- if (!fun->shared()->is_generator() &&
- fun->shared()->CanGenerateInlineConstructor(prototype)) {
- int count = fun->shared()->this_property_assignments_count();
- if (count > in_object_properties) {
- // Inline constructor can only handle inobject properties.
- fun->shared()->ForbidInlineConstructor();
- } else {
- DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
- if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
-
- DescriptorArray::WhitenessWitness witness(descriptors);
- for (int i = 0; i < count; i++) {
- String* name = fun->shared()->GetThisPropertyAssignmentName(i);
- ASSERT(name->IsInternalizedString());
- // TODO(verwaest): Since we cannot update the boilerplate's map yet,
- // initialize to the worst case.
- FieldDescriptor field(name, i, NONE, Representation::Tagged());
- descriptors->Set(i, &field, witness);
- }
- descriptors->Sort();
-
- // The descriptors may contain duplicates because the compiler does not
- // guarantee the uniqueness of property names (it would have required
- // quadratic time). Once the descriptors are sorted we can check for
- // duplicates in linear time.
- if (HasDuplicates(descriptors)) {
- fun->shared()->ForbidInlineConstructor();
- } else {
- map->InitializeDescriptors(descriptors);
- map->set_pre_allocated_property_fields(count);
- map->set_unused_property_fields(in_object_properties - count);
- }
- }
- }
-
if (!fun->shared()->is_generator()) {
fun->shared()->StartInobjectSlackTracking(map);
}
@@ -4293,10 +4370,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
- int prop_size =
- map->pre_allocated_property_fields() +
- map->unused_property_fields() -
- map->inobject_properties();
+ int prop_size = map->InitialPropertiesLength();
ASSERT(prop_size >= 0);
Object* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
@@ -4333,10 +4407,7 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
- int prop_size =
- map->pre_allocated_property_fields() +
- map->unused_property_fields() -
- map->inobject_properties();
+ int prop_size = map->InitialPropertiesLength();
ASSERT(prop_size >= 0);
Object* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
@@ -5319,7 +5390,7 @@ MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
result->set_length(len);
// Copy the content
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
return result;
@@ -5742,7 +5813,7 @@ bool Heap::IsHeapIterable() {
void Heap::EnsureHeapIsIterable() {
- ASSERT(IsAllocationAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
if (!IsHeapIterable()) {
CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
}
@@ -5959,10 +6030,8 @@ void Heap::ReportHeapStatistics(const char* title) {
USE(title);
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_);
- PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
- old_gen_promotion_limit_);
- PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
- old_gen_allocation_limit_);
+ PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
+ old_generation_allocation_limit_);
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
@@ -7063,7 +7132,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
visitor.TransitiveClosure();
}
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation_;
};
@@ -7512,6 +7581,8 @@ GCTracer::~GCTracer() {
PrintF("intracompaction_ptrs=%.1f ",
scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
+ PrintF("weakmap_process=%.1f ", scopes_[Scope::MC_WEAKMAP_PROCESS]);
+ PrintF("weakmap_clear=%.1f ", scopes_[Scope::MC_WEAKMAP_CLEAR]);
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
@@ -7749,7 +7820,7 @@ void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
Object* object = list_[i];
JSFunction* getter_fun;
- { AssertNoAllocation assert;
+ { DisallowHeapAllocation no_gc;
// Skip possible holes in the list.
if (object->IsTheHole()) continue;
if (isolate->heap()->InNewSpace(object) || budget == 0) {
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index b24b0b3608..da10efcee5 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -31,6 +31,7 @@
#include <cmath>
#include "allocation.h"
+#include "assert-scope.h"
#include "globals.h"
#include "incremental-marking.h"
#include "list.h"
@@ -58,6 +59,7 @@ namespace internal {
V(Oddball, null_value, NullValue) \
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
+ V(Oddball, uninitialized_value, UninitializedValue) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, meta_map, MetaMap) \
@@ -181,7 +183,10 @@ namespace internal {
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
V(JSObject, observation_state, ObservationState) \
- V(Map, external_map, ExternalMap)
+ V(Map, external_map, ExternalMap) \
+ V(Symbol, frozen_symbol, FrozenSymbol) \
+ V(SeededNumberDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@@ -288,10 +293,10 @@ namespace internal {
V(hidden_stack_trace_string, "v8::hidden_stack_trace") \
V(query_colon_string, "(?:)") \
V(Generator_string, "Generator") \
- V(send_string, "send") \
V(throw_string, "throw") \
V(done_string, "done") \
- V(value_string, "value")
+ V(value_string, "value") \
+ V(next_string, "next")
// Forward declarations.
class GCTracer;
@@ -547,7 +552,7 @@ class Heap {
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
- int MaxNewSpaceAllocationSize() { return InitialSemiSpaceSize() * 3/4; }
+ int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 3/4; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -934,6 +939,10 @@ class Heap {
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSGlobalPropertyCell(Object* value);
+ // Allocate Box.
+ MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
+ PretenureFlag pretenure);
+
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -1343,6 +1352,12 @@ class Heap {
}
Object* native_contexts_list() { return native_contexts_list_; }
+ void set_array_buffers_list(Object* object) {
+ array_buffers_list_ = object;
+ }
+ Object* array_buffers_list() { return array_buffers_list_; }
+
+
// Number of mark-sweeps.
unsigned int ms_count() { return ms_count_; }
@@ -1493,10 +1508,6 @@ class Heap {
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
#ifdef DEBUG
- bool IsAllocationAllowed() { return allocation_allowed_; }
- inline void set_allow_allocation(bool allocation_allowed);
- inline bool allow_allocation(bool enable);
-
bool disallow_allocation_failure() {
return disallow_allocation_failure_;
}
@@ -1546,7 +1557,12 @@ class Heap {
// Predicate that governs global pre-tenuring decisions based on observed
// promotion rates of previous collections.
inline bool ShouldGloballyPretenure() {
- return new_space_high_promotion_mode_active_;
+ return FLAG_pretenuring && new_space_high_promotion_mode_active_;
+ }
+
+ // This is only needed for testing high promotion mode.
+ void SetNewSpaceHighPromotionModeActive(bool mode) {
+ new_space_high_promotion_mode_active_ = mode;
}
inline PretenureFlag GetPretenureMode() {
@@ -1561,44 +1577,23 @@ class Heap {
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
- // True if we have reached the allocation limit in the old generation that
- // should force the next GC (caused normally) to be a full one.
- inline bool OldGenerationPromotionLimitReached() {
- return PromotedTotalSize() > old_gen_promotion_limit_;
- }
-
inline intptr_t OldGenerationSpaceAvailable() {
- return old_gen_allocation_limit_ - PromotedTotalSize();
+ return old_generation_allocation_limit_ - PromotedTotalSize();
}
inline intptr_t OldGenerationCapacityAvailable() {
return max_old_generation_size_ - PromotedTotalSize();
}
- static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
- static const intptr_t kMinimumAllocationLimit =
+ static const intptr_t kMinimumOldGenerationAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
- intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
+ intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 :
new_space_high_promotion_mode_active_ ? 1 : 3;
intptr_t limit =
- Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
- limit += new_space_.Capacity();
- // TODO(hpayer): Can be removed when when pretenuring is supported for all
- // allocation sites.
- if (IsHighSurvivalRate() && IsStableOrIncreasingSurvivalTrend()) {
- limit *= 2;
- }
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
- return Min(limit, halfway_to_the_max);
- }
-
- intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
- const int divisor = FLAG_stress_compaction ? 8 :
- new_space_high_promotion_mode_active_ ? 1 : 2;
- intptr_t limit =
- Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
+ Max(old_gen_size + old_gen_size / divisor,
+ kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
// TODO(hpayer): Can be removed when when pretenuring is supported for all
// allocation sites.
@@ -1679,22 +1674,14 @@ class Heap {
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
- intptr_t total_promoted = PromotedTotalSize();
-
- intptr_t adjusted_promotion_limit =
- old_gen_promotion_limit_ - new_space_.Capacity();
-
- if (total_promoted >= adjusted_promotion_limit) return true;
-
intptr_t adjusted_allocation_limit =
- old_gen_allocation_limit_ - new_space_.Capacity() / 5;
+ old_generation_allocation_limit_ - new_space_.Capacity();
- if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
+ if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
return false;
}
-
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
@@ -2000,8 +1987,6 @@ class Heap {
#undef ROOT_ACCESSOR
#ifdef DEBUG
- bool allocation_allowed_;
-
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
@@ -2019,13 +2004,9 @@ class Heap {
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
- // which collector to invoke.
- intptr_t old_gen_promotion_limit_;
-
- // Limit that triggers a global GC as soon as is reasonable. This is
- // checked before expanding a paged space in the old generation and on
- // every allocation in large object space.
- intptr_t old_gen_allocation_limit_;
+ // which collector to invoke, before expanding a paged space in the old
+ // generation and on every allocation in large object space.
+ intptr_t old_generation_allocation_limit_;
// Used to adjust the limits that control the timing of the next GC.
intptr_t size_of_old_gen_at_last_old_space_gc_;
@@ -2043,10 +2024,12 @@ class Heap {
// Indicates that an allocation has failed in the old generation since the
// last GC.
- int old_gen_exhausted_;
+ bool old_gen_exhausted_;
Object* native_contexts_list_;
+ Object* array_buffers_list_;
+
StoreBufferRebuilder store_buffer_rebuilder_;
struct StringTypeTable {
@@ -2190,6 +2173,9 @@ class Heap {
// Code to be run before and after mark-compact.
void MarkCompactPrologue();
+ void ProcessNativeContexts(WeakObjectRetainer* retainer, bool record_slots);
+ void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots);
+
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
void ReportStatisticsAfterGC();
@@ -2724,43 +2710,6 @@ class DescriptorLookupCache {
};
-// A helper class to document/test C++ scopes where we do not
-// expect a GC. Usage:
-//
-// /* Allocation not allowed: we cannot handle a GC in this scope. */
-// { AssertNoAllocation nogc;
-// ...
-// }
-
-#ifdef DEBUG
-inline bool EnterAllocationScope(Isolate* isolate, bool allow_allocation);
-inline void ExitAllocationScope(Isolate* isolate, bool last_state);
-#endif
-
-
-class AssertNoAllocation {
- public:
- inline AssertNoAllocation();
- inline ~AssertNoAllocation();
-
-#ifdef DEBUG
- private:
- bool last_state_;
-#endif
-};
-
-
-class DisableAssertNoAllocation {
- public:
- inline DisableAssertNoAllocation();
- inline ~DisableAssertNoAllocation();
-
-#ifdef DEBUG
- private:
- bool last_state_;
-#endif
-};
-
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
@@ -2780,6 +2729,8 @@ class GCTracer BASE_EMBEDDED {
MC_UPDATE_POINTERS_TO_EVACUATED,
MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
MC_UPDATE_MISC_POINTERS,
+ MC_WEAKMAP_PROCESS,
+ MC_WEAKMAP_CLEAR,
MC_FLUSH_CODE,
kNumberOfScopes
};
@@ -3075,7 +3026,7 @@ class PathTracer : public ObjectVisitor {
what_to_find_(what_to_find),
visit_mode_(visit_mode),
object_stack_(20),
- no_alloc() {}
+ no_allocation() {}
virtual void VisitPointers(Object** start, Object** end);
@@ -3104,7 +3055,7 @@ class PathTracer : public ObjectVisitor {
VisitMode visit_mode_;
List<Object*> object_stack_;
- AssertNoAllocation no_alloc; // i.e. no gc allowed.
+ DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
diff --git a/deps/v8/src/hydrogen-environment-liveness.cc b/deps/v8/src/hydrogen-environment-liveness.cc
new file mode 100644
index 0000000000..8c660597ab
--- /dev/null
+++ b/deps/v8/src/hydrogen-environment-liveness.cc
@@ -0,0 +1,267 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "hydrogen-environment-liveness.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+EnvironmentSlotLivenessAnalyzer::EnvironmentSlotLivenessAnalyzer(
+ HGraph* graph)
+ : graph_(graph),
+ zone_(graph->isolate()),
+ zone_scope_(&zone_, DELETE_ON_EXIT),
+ block_count_(graph->blocks()->length()),
+ maximum_environment_size_(graph->maximum_environment_size()),
+ collect_markers_(true),
+ last_simulate_(NULL) {
+ if (maximum_environment_size_ == 0) return;
+
+ live_at_block_start_ =
+ new(zone()) ZoneList<BitVector*>(block_count_, zone());
+ first_simulate_ = new(zone()) ZoneList<HSimulate*>(block_count_, zone());
+ first_simulate_invalid_for_index_ =
+ new(zone()) ZoneList<BitVector*>(block_count_, zone());
+ markers_ = new(zone())
+ ZoneList<HEnvironmentMarker*>(maximum_environment_size_, zone());
+ went_live_since_last_simulate_ =
+ new(zone()) BitVector(maximum_environment_size_, zone());
+
+ for (int i = 0; i < block_count_; ++i) {
+ live_at_block_start_->Add(
+ new(zone()) BitVector(maximum_environment_size_, zone()), zone());
+ first_simulate_->Add(NULL, zone());
+ first_simulate_invalid_for_index_->Add(
+ new(zone()) BitVector(maximum_environment_size_, zone()), zone());
+ }
+}
+
+
+void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlot(int index,
+ HSimulate* simulate) {
+ int operand_index = simulate->ToOperandIndex(index);
+ if (operand_index == -1) {
+ simulate->AddAssignedValue(index, graph_->GetConstantUndefined());
+ } else {
+ simulate->SetOperandAt(operand_index, graph_->GetConstantUndefined());
+ }
+}
+
+
+void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlotsInSuccessors(
+ HBasicBlock* block,
+ BitVector* live) {
+ // When a value is live in successor A but dead in B, we must
+ // explicitly zap it in B.
+ for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+ HBasicBlock* successor = it.Current();
+ int successor_id = successor->block_id();
+ BitVector* live_in_successor = live_at_block_start_->at(successor_id);
+ if (live_in_successor->Equals(*live)) continue;
+ for (int i = 0; i < live->length(); ++i) {
+ if (!live->Contains(i)) continue;
+ if (live_in_successor->Contains(i)) continue;
+ if (first_simulate_invalid_for_index_->at(successor_id)->Contains(i)) {
+ continue;
+ }
+ HSimulate* simulate = first_simulate_->at(successor_id);
+ if (simulate == NULL) continue;
+ ASSERT(simulate->closure().is_identical_to(
+ block->last_environment()->closure()));
+ ZapEnvironmentSlot(i, simulate);
+ }
+ }
+}
+
+
+void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlotsForInstruction(
+ HEnvironmentMarker* marker) {
+ if (!marker->CheckFlag(HValue::kEndsLiveRange)) return;
+ HSimulate* simulate = marker->next_simulate();
+ if (simulate != NULL) {
+ ASSERT(simulate->closure().is_identical_to(marker->closure()));
+ ZapEnvironmentSlot(marker->index(), simulate);
+ }
+}
+
+
+void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtBlockEnd(
+ HBasicBlock* block,
+ BitVector* live) {
+ // Liveness at the end of each block: union of liveness in successors.
+ live->Clear();
+ for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+ live->Union(*live_at_block_start_->at(it.Current()->block_id()));
+ }
+}
+
+
+void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtInstruction(
+ HInstruction* instr,
+ BitVector* live) {
+ switch (instr->opcode()) {
+ case HValue::kEnvironmentMarker: {
+ HEnvironmentMarker* marker = HEnvironmentMarker::cast(instr);
+ int index = marker->index();
+ if (!live->Contains(index)) {
+ marker->SetFlag(HValue::kEndsLiveRange);
+ } else {
+ marker->ClearFlag(HValue::kEndsLiveRange);
+ }
+ if (!went_live_since_last_simulate_->Contains(index)) {
+ marker->set_next_simulate(last_simulate_);
+ }
+ if (marker->kind() == HEnvironmentMarker::LOOKUP) {
+ live->Add(index);
+ } else {
+ ASSERT(marker->kind() == HEnvironmentMarker::BIND);
+ live->Remove(index);
+ went_live_since_last_simulate_->Add(index);
+ }
+ if (collect_markers_) {
+ // Populate |markers_| list during the first pass.
+ markers_->Add(marker, &zone_);
+ }
+ break;
+ }
+ case HValue::kLeaveInlined:
+ // No environment values are live at the end of an inlined section.
+ live->Clear();
+ last_simulate_ = NULL;
+
+ // The following ASSERTs guard the assumption used in case
+ // kEnterInlined below:
+ ASSERT(instr->next()->IsSimulate());
+ ASSERT(instr->next()->next()->IsGoto());
+
+ break;
+ case HValue::kEnterInlined: {
+ // Those environment values are live that are live at any return
+ // target block. Here we make use of the fact that the end of an
+ // inline sequence always looks like this: HLeaveInlined, HSimulate,
+ // HGoto (to return_target block), with no environment lookups in
+ // between (see ASSERTs above).
+ HEnterInlined* enter = HEnterInlined::cast(instr);
+ live->Clear();
+ for (int i = 0; i < enter->return_targets()->length(); ++i) {
+ int return_id = enter->return_targets()->at(i)->block_id();
+ // When an AbnormalExit is involved, it can happen that the return
+ // target block doesn't actually exist.
+ if (return_id < live_at_block_start_->length()) {
+ live->Union(*live_at_block_start_->at(return_id));
+ }
+ }
+ last_simulate_ = NULL;
+ break;
+ }
+ case HValue::kDeoptimize: {
+ // Keep all environment slots alive.
+ HDeoptimize* deopt = HDeoptimize::cast(instr);
+ for (int i = deopt->first_local_index();
+ i < deopt->first_expression_index(); ++i) {
+ live->Add(i);
+ }
+ break;
+ }
+ case HValue::kSimulate:
+ last_simulate_ = HSimulate::cast(instr);
+ went_live_since_last_simulate_->Clear();
+ break;
+ default:
+ break;
+ }
+}
+
+
+void EnvironmentSlotLivenessAnalyzer::AnalyzeAndTrim() {
+ HPhase phase("H_EnvironmentLivenessAnalysis", graph_);
+ if (maximum_environment_size_ == 0) return;
+
+ // Main iteration. Compute liveness of environment slots, and store it
+ // for each block until it doesn't change any more. For efficiency, visit
+ // blocks in reverse order and walk backwards through each block. We
+ // need several iterations to propagate liveness through nested loops.
+ BitVector* live = new(zone()) BitVector(maximum_environment_size_, zone());
+ BitVector* worklist = new(zone()) BitVector(block_count_, zone());
+ for (int i = 0; i < block_count_; ++i) {
+ worklist->Add(i);
+ }
+ while (!worklist->IsEmpty()) {
+ for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
+ if (!worklist->Contains(block_id)) {
+ continue;
+ }
+ worklist->Remove(block_id);
+ last_simulate_ = NULL;
+
+ HBasicBlock* block = graph_->blocks()->at(block_id);
+ UpdateLivenessAtBlockEnd(block, live);
+
+ for (HInstruction* instr = block->last(); instr != NULL;
+ instr = instr->previous()) {
+ UpdateLivenessAtInstruction(instr, live);
+ }
+
+ // Reached the start of the block, do necessary bookkeeping:
+ // store computed information for this block and add predecessors
+ // to the work list as necessary.
+ first_simulate_->Set(block_id, last_simulate_);
+ first_simulate_invalid_for_index_->at(block_id)->CopyFrom(
+ *went_live_since_last_simulate_);
+ if (live_at_block_start_->at(block_id)->UnionIsChanged(*live)) {
+ for (int i = 0; i < block->predecessors()->length(); ++i) {
+ worklist->Add(block->predecessors()->at(i)->block_id());
+ }
+ if (block->IsInlineReturnTarget()) {
+ worklist->Add(block->inlined_entry_block()->block_id());
+ }
+ }
+ }
+ // Only collect bind/lookup instructions during the first pass.
+ collect_markers_ = false;
+ }
+
+ // Analysis finished. Zap dead environment slots.
+ for (int i = 0; i < markers_->length(); ++i) {
+ ZapEnvironmentSlotsForInstruction(markers_->at(i));
+ }
+ for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
+ HBasicBlock* block = graph_->blocks()->at(block_id);
+ UpdateLivenessAtBlockEnd(block, live);
+ ZapEnvironmentSlotsInSuccessors(block, live);
+ }
+
+ // Finally, remove the HEnvironment{Bind,Lookup} markers.
+ for (int i = 0; i < markers_->length(); ++i) {
+ markers_->at(i)->DeleteAndReplaceWith(NULL);
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-environment-liveness.h b/deps/v8/src/hydrogen-environment-liveness.h
new file mode 100644
index 0000000000..484e56d52e
--- /dev/null
+++ b/deps/v8/src/hydrogen-environment-liveness.h
@@ -0,0 +1,94 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
+#define V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
+
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Trims live ranges of environment slots by doing explicit liveness analysis.
+// Values in the environment are kept alive by every subsequent LInstruction
+// that is assigned an LEnvironment, which creates register pressure and
+// unnecessary spill slot moves. Therefore it is beneficial to trim the
+// live ranges of environment slots by zapping them with a constant after
+// the last lookup that refers to them.
+// Slots are identified by their index and only affected if whitelisted in
+// HOptimizedGraphBuilder::IsEligibleForEnvironmentLivenessAnalysis().
+class EnvironmentSlotLivenessAnalyzer {
+ public:
+ explicit EnvironmentSlotLivenessAnalyzer(HGraph* graph);
+
+ void AnalyzeAndTrim();
+
+ private:
+ void ZapEnvironmentSlot(int index, HSimulate* simulate);
+ void ZapEnvironmentSlotsInSuccessors(HBasicBlock* block, BitVector* live);
+ void ZapEnvironmentSlotsForInstruction(HEnvironmentMarker* marker);
+ void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live);
+ void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live);
+
+ Zone* zone() { return &zone_; }
+
+ HGraph* graph_;
+ // Use a dedicated Zone for this phase, with a ZoneScope to ensure it
+ // gets freed.
+ Zone zone_;
+ ZoneScope zone_scope_;
+
+ int block_count_;
+
+ // Largest number of local variables in any environment in the graph
+ // (including inlined environments).
+ int maximum_environment_size_;
+
+ // Per-block data. All these lists are indexed by block_id.
+ ZoneList<BitVector*>* live_at_block_start_;
+ ZoneList<HSimulate*>* first_simulate_;
+ ZoneList<BitVector*>* first_simulate_invalid_for_index_;
+
+ // List of all HEnvironmentMarker instructions for quick iteration/deletion.
+ // It is populated during the first pass over the graph, controlled by
+ // |collect_markers_|.
+ ZoneList<HEnvironmentMarker*>* markers_;
+ bool collect_markers_;
+
+ // Keeps track of the last simulate seen, as well as the environment slots
+ // for which a new live range has started since (so they must not be zapped
+ // in that simulate when the end of another live range of theirs is found).
+ HSimulate* last_simulate_;
+ BitVector* went_live_since_last_simulate_;
+};
+
+
+} } // namespace v8::internal
+
+#endif /* V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_ */
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
new file mode 100644
index 0000000000..aa2dff7655
--- /dev/null
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -0,0 +1,855 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen.h"
+#include "hydrogen-gvn.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class HValueMap: public ZoneObject {
+ public:
+ explicit HValueMap(Zone* zone)
+ : array_size_(0),
+ lists_size_(0),
+ count_(0),
+ present_flags_(0),
+ array_(NULL),
+ lists_(NULL),
+ free_list_head_(kNil) {
+ ResizeLists(kInitialSize, zone);
+ Resize(kInitialSize, zone);
+ }
+
+ void Kill(GVNFlagSet flags);
+
+ void Add(HValue* value, Zone* zone) {
+ present_flags_.Add(value->gvn_flags());
+ Insert(value, zone);
+ }
+
+ HValue* Lookup(HValue* value) const;
+
+ HValueMap* Copy(Zone* zone) const {
+ return new(zone) HValueMap(zone, this);
+ }
+
+ bool IsEmpty() const { return count_ == 0; }
+
+ private:
+ // A linked list of HValue* values. Stored in arrays.
+ struct HValueMapListElement {
+ HValue* value;
+ int next; // Index in the array of the next list element.
+ };
+ static const int kNil = -1; // The end of a linked list
+
+ // Must be a power of 2.
+ static const int kInitialSize = 16;
+
+ HValueMap(Zone* zone, const HValueMap* other);
+
+ void Resize(int new_size, Zone* zone);
+ void ResizeLists(int new_size, Zone* zone);
+ void Insert(HValue* value, Zone* zone);
+ uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
+
+ int array_size_;
+ int lists_size_;
+ int count_; // The number of values stored in the HValueMap.
+ GVNFlagSet present_flags_; // All flags that are in any value in the
+ // HValueMap.
+ HValueMapListElement* array_; // Primary store - contains the first value
+ // with a given hash. Colliding elements are stored in linked lists.
+ HValueMapListElement* lists_; // The linked lists containing hash collisions.
+ int free_list_head_; // Unused elements in lists_ are on the free list.
+};
+
+
+class HSideEffectMap BASE_EMBEDDED {
+ public:
+ HSideEffectMap();
+ explicit HSideEffectMap(HSideEffectMap* other);
+ HSideEffectMap& operator= (const HSideEffectMap& other);
+
+ void Kill(GVNFlagSet flags);
+
+ void Store(GVNFlagSet flags, HInstruction* instr);
+
+ bool IsEmpty() const { return count_ == 0; }
+
+ inline HInstruction* operator[](int i) const {
+ ASSERT(0 <= i);
+ ASSERT(i < kNumberOfTrackedSideEffects);
+ return data_[i];
+ }
+ inline HInstruction* at(int i) const { return operator[](i); }
+
+ private:
+ int count_;
+ HInstruction* data_[kNumberOfTrackedSideEffects];
+};
+
+
+void TraceGVN(const char* msg, ...) {
+ va_list arguments;
+ va_start(arguments, msg);
+ OS::VPrint(msg, arguments);
+ va_end(arguments);
+}
+
+// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
+// --trace-gvn is off.
+#define TRACE_GVN_1(msg, a1) \
+ if (FLAG_trace_gvn) { \
+ TraceGVN(msg, a1); \
+ }
+
+#define TRACE_GVN_2(msg, a1, a2) \
+ if (FLAG_trace_gvn) { \
+ TraceGVN(msg, a1, a2); \
+ }
+
+#define TRACE_GVN_3(msg, a1, a2, a3) \
+ if (FLAG_trace_gvn) { \
+ TraceGVN(msg, a1, a2, a3); \
+ }
+
+#define TRACE_GVN_4(msg, a1, a2, a3, a4) \
+ if (FLAG_trace_gvn) { \
+ TraceGVN(msg, a1, a2, a3, a4); \
+ }
+
+#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5) \
+ if (FLAG_trace_gvn) { \
+ TraceGVN(msg, a1, a2, a3, a4, a5); \
+ }
+
+
+HValueMap::HValueMap(Zone* zone, const HValueMap* other)
+ : array_size_(other->array_size_),
+ lists_size_(other->lists_size_),
+ count_(other->count_),
+ present_flags_(other->present_flags_),
+ array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
+ lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
+ free_list_head_(other->free_list_head_) {
+ OS::MemCopy(
+ array_, other->array_, array_size_ * sizeof(HValueMapListElement));
+ OS::MemCopy(
+ lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
+}
+
+
+void HValueMap::Kill(GVNFlagSet flags) {
+ GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
+ if (!present_flags_.ContainsAnyOf(depends_flags)) return;
+ present_flags_.RemoveAll();
+ for (int i = 0; i < array_size_; ++i) {
+ HValue* value = array_[i].value;
+ if (value != NULL) {
+ // Clear list of collisions first, so we know if it becomes empty.
+ int kept = kNil; // List of kept elements.
+ int next;
+ for (int current = array_[i].next; current != kNil; current = next) {
+ next = lists_[current].next;
+ HValue* value = lists_[current].value;
+ if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
+ // Drop it.
+ count_--;
+ lists_[current].next = free_list_head_;
+ free_list_head_ = current;
+ } else {
+ // Keep it.
+ lists_[current].next = kept;
+ kept = current;
+ present_flags_.Add(value->gvn_flags());
+ }
+ }
+ array_[i].next = kept;
+
+ // Now possibly drop directly indexed element.
+ value = array_[i].value;
+ if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
+ count_--;
+ int head = array_[i].next;
+ if (head == kNil) {
+ array_[i].value = NULL;
+ } else {
+ array_[i].value = lists_[head].value;
+ array_[i].next = lists_[head].next;
+ lists_[head].next = free_list_head_;
+ free_list_head_ = head;
+ }
+ } else {
+ present_flags_.Add(value->gvn_flags()); // Keep it.
+ }
+ }
+ }
+}
+
+
+HValue* HValueMap::Lookup(HValue* value) const {
+ uint32_t hash = static_cast<uint32_t>(value->Hashcode());
+ uint32_t pos = Bound(hash);
+ if (array_[pos].value != NULL) {
+ if (array_[pos].value->Equals(value)) return array_[pos].value;
+ int next = array_[pos].next;
+ while (next != kNil) {
+ if (lists_[next].value->Equals(value)) return lists_[next].value;
+ next = lists_[next].next;
+ }
+ }
+ return NULL;
+}
+
+
+void HValueMap::Resize(int new_size, Zone* zone) {
+ ASSERT(new_size > count_);
+ // Hashing the values into the new array has no more collisions than in the
+ // old hash map, so we can use the existing lists_ array, if we are careful.
+
+ // Make sure we have at least one free element.
+ if (free_list_head_ == kNil) {
+ ResizeLists(lists_size_ << 1, zone);
+ }
+
+ HValueMapListElement* new_array =
+ zone->NewArray<HValueMapListElement>(new_size);
+ memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
+
+ HValueMapListElement* old_array = array_;
+ int old_size = array_size_;
+
+ int old_count = count_;
+ count_ = 0;
+ // Do not modify present_flags_. It is currently correct.
+ array_size_ = new_size;
+ array_ = new_array;
+
+ if (old_array != NULL) {
+ // Iterate over all the elements in lists, rehashing them.
+ for (int i = 0; i < old_size; ++i) {
+ if (old_array[i].value != NULL) {
+ int current = old_array[i].next;
+ while (current != kNil) {
+ Insert(lists_[current].value, zone);
+ int next = lists_[current].next;
+ lists_[current].next = free_list_head_;
+ free_list_head_ = current;
+ current = next;
+ }
+ // Rehash the directly stored value.
+ Insert(old_array[i].value, zone);
+ }
+ }
+ }
+ USE(old_count);
+ ASSERT(count_ == old_count);
+}
+
+
+void HValueMap::ResizeLists(int new_size, Zone* zone) {
+ ASSERT(new_size > lists_size_);
+
+ HValueMapListElement* new_lists =
+ zone->NewArray<HValueMapListElement>(new_size);
+ memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
+
+ HValueMapListElement* old_lists = lists_;
+ int old_size = lists_size_;
+
+ lists_size_ = new_size;
+ lists_ = new_lists;
+
+ if (old_lists != NULL) {
+ OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
+ }
+ for (int i = old_size; i < lists_size_; ++i) {
+ lists_[i].next = free_list_head_;
+ free_list_head_ = i;
+ }
+}
+
+
+void HValueMap::Insert(HValue* value, Zone* zone) {
+ ASSERT(value != NULL);
+ // Resizing when half of the hashtable is filled up.
+ if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
+ ASSERT(count_ < array_size_);
+ count_++;
+ uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
+ if (array_[pos].value == NULL) {
+ array_[pos].value = value;
+ array_[pos].next = kNil;
+ } else {
+ if (free_list_head_ == kNil) {
+ ResizeLists(lists_size_ << 1, zone);
+ }
+ int new_element_pos = free_list_head_;
+ ASSERT(new_element_pos != kNil);
+ free_list_head_ = lists_[free_list_head_].next;
+ lists_[new_element_pos].value = value;
+ lists_[new_element_pos].next = array_[pos].next;
+ ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
+ array_[pos].next = new_element_pos;
+ }
+}
+
+
+HSideEffectMap::HSideEffectMap() : count_(0) {
+ memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize);
+}
+
+
+HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
+ *this = *other; // Calls operator=.
+}
+
+
+HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
+ if (this != &other) {
+ OS::MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
+ }
+ return *this;
+}
+
+void HSideEffectMap::Kill(GVNFlagSet flags) {
+ for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ if (flags.Contains(changes_flag)) {
+ if (data_[i] != NULL) count_--;
+ data_[i] = NULL;
+ }
+ }
+}
+
+
+void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
+ for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ if (flags.Contains(changes_flag)) {
+ if (data_[i] == NULL) count_++;
+ data_[i] = instr;
+ }
+ }
+}
+
+
+HGlobalValueNumberer::HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
+ : graph_(graph),
+ info_(info),
+ removed_side_effects_(false),
+ block_side_effects_(graph->blocks()->length(), graph->zone()),
+ loop_side_effects_(graph->blocks()->length(), graph->zone()),
+ visited_on_paths_(graph->zone(), graph->blocks()->length()) {
+ ASSERT(!AllowHandleAllocation::IsAllowed());
+ block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
+ graph_->zone());
+ loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
+ graph_->zone());
+ }
+
+bool HGlobalValueNumberer::Analyze() {
+ removed_side_effects_ = false;
+ ComputeBlockSideEffects();
+ if (FLAG_loop_invariant_code_motion) {
+ LoopInvariantCodeMotion();
+ }
+ AnalyzeGraph();
+ return removed_side_effects_;
+}
+
+
+void HGlobalValueNumberer::ComputeBlockSideEffects() {
+ // The Analyze phase of GVN can be called multiple times. Clear loop side
+ // effects before computing them to erase the contents from previous Analyze
+ // passes.
+ for (int i = 0; i < loop_side_effects_.length(); ++i) {
+ loop_side_effects_[i].RemoveAll();
+ }
+ for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
+ // Compute side effects for the block.
+ HBasicBlock* block = graph_->blocks()->at(i);
+ HInstruction* instr = block->first();
+ int id = block->block_id();
+ GVNFlagSet side_effects;
+ while (instr != NULL) {
+ side_effects.Add(instr->ChangesFlags());
+ if (instr->IsSoftDeoptimize()) {
+ block_side_effects_[id].RemoveAll();
+ side_effects.RemoveAll();
+ break;
+ }
+ instr = instr->next();
+ }
+ block_side_effects_[id].Add(side_effects);
+
+ // Loop headers are part of their loop.
+ if (block->IsLoopHeader()) {
+ loop_side_effects_[id].Add(side_effects);
+ }
+
+ // Propagate loop side effects upwards.
+ if (block->HasParentLoopHeader()) {
+ int header_id = block->parent_loop_header()->block_id();
+ loop_side_effects_[header_id].Add(block->IsLoopHeader()
+ ? loop_side_effects_[id]
+ : side_effects);
+ }
+ }
+}
+
+
+SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
+ char underlying_buffer[kLastFlag * 128];
+ Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
+#if DEBUG
+ int offset = 0;
+ const char* separator = "";
+ const char* comma = ", ";
+ buffer[0] = 0;
+ uint32_t set_depends_on = 0;
+ uint32_t set_changes = 0;
+ for (int bit = 0; bit < kLastFlag; ++bit) {
+ if ((flags.ToIntegral() & (1 << bit)) != 0) {
+ if (bit % 2 == 0) {
+ set_changes++;
+ } else {
+ set_depends_on++;
+ }
+ }
+ }
+ bool positive_changes = set_changes < (kLastFlag / 2);
+ bool positive_depends_on = set_depends_on < (kLastFlag / 2);
+ if (set_changes > 0) {
+ if (positive_changes) {
+ offset += OS::SNPrintF(buffer + offset, "changes [");
+ } else {
+ offset += OS::SNPrintF(buffer + offset, "changes all except [");
+ }
+ for (int bit = 0; bit < kLastFlag; ++bit) {
+ if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_changes) {
+ switch (static_cast<GVNFlag>(bit)) {
+#define DECLARE_FLAG(type) \
+ case kChanges##type: \
+ offset += OS::SNPrintF(buffer + offset, separator); \
+ offset += OS::SNPrintF(buffer + offset, #type); \
+ separator = comma; \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ offset += OS::SNPrintF(buffer + offset, "]");
+ }
+ if (set_depends_on > 0) {
+ separator = "";
+ if (set_changes > 0) {
+ offset += OS::SNPrintF(buffer + offset, ", ");
+ }
+ if (positive_depends_on) {
+ offset += OS::SNPrintF(buffer + offset, "depends on [");
+ } else {
+ offset += OS::SNPrintF(buffer + offset, "depends on all except [");
+ }
+ for (int bit = 0; bit < kLastFlag; ++bit) {
+ if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_depends_on) {
+ switch (static_cast<GVNFlag>(bit)) {
+#define DECLARE_FLAG(type) \
+ case kDependsOn##type: \
+ offset += OS::SNPrintF(buffer + offset, separator); \
+ offset += OS::SNPrintF(buffer + offset, #type); \
+ separator = comma; \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ offset += OS::SNPrintF(buffer + offset, "]");
+ }
+#else
+ OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
+#endif
+ size_t string_len = strlen(underlying_buffer) + 1;
+ ASSERT(string_len <= sizeof(underlying_buffer));
+ char* result = new char[strlen(underlying_buffer) + 1];
+ OS::MemCopy(result, underlying_buffer, string_len);
+ return SmartArrayPointer<char>(result);
+}
+
+
+void HGlobalValueNumberer::LoopInvariantCodeMotion() {
+ TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
+ graph_->use_optimistic_licm() ? "yes" : "no");
+ for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
+ HBasicBlock* block = graph_->blocks()->at(i);
+ if (block->IsLoopHeader()) {
+ GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
+ TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
+ block->block_id(),
+ *GetGVNFlagsString(side_effects));
+
+ GVNFlagSet accumulated_first_time_depends;
+ GVNFlagSet accumulated_first_time_changes;
+ HBasicBlock* last = block->loop_information()->GetLastBackEdge();
+ for (int j = block->block_id(); j <= last->block_id(); ++j) {
+ ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects,
+ &accumulated_first_time_depends,
+ &accumulated_first_time_changes);
+ }
+ }
+ }
+}
+
+
+void HGlobalValueNumberer::ProcessLoopBlock(
+ HBasicBlock* block,
+ HBasicBlock* loop_header,
+ GVNFlagSet loop_kills,
+ GVNFlagSet* first_time_depends,
+ GVNFlagSet* first_time_changes) {
+ HBasicBlock* pre_header = loop_header->predecessors()->at(0);
+ GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
+ TRACE_GVN_2("Loop invariant motion for B%d %s\n",
+ block->block_id(),
+ *GetGVNFlagsString(depends_flags));
+ HInstruction* instr = block->first();
+ while (instr != NULL) {
+ HInstruction* next = instr->next();
+ bool hoisted = false;
+ if (instr->CheckFlag(HValue::kUseGVN)) {
+ TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
+ instr->id(),
+ instr->Mnemonic(),
+ *GetGVNFlagsString(instr->gvn_flags()),
+ *GetGVNFlagsString(loop_kills));
+ bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
+ if (can_hoist && !graph()->use_optimistic_licm()) {
+ can_hoist = block->IsLoopSuccessorDominator();
+ }
+
+ if (can_hoist) {
+ bool inputs_loop_invariant = true;
+ for (int i = 0; i < instr->OperandCount(); ++i) {
+ if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
+ inputs_loop_invariant = false;
+ }
+ }
+
+ if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
+ TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id());
+ // Move the instruction out of the loop.
+ instr->Unlink();
+ instr->InsertBefore(pre_header->end());
+ if (instr->HasSideEffects()) removed_side_effects_ = true;
+ hoisted = true;
+ }
+ }
+ }
+ if (!hoisted) {
+ // If an instruction is not hoisted, we have to account for its side
+ // effects when hoisting later HTransitionElementsKind instructions.
+ GVNFlagSet previous_depends = *first_time_depends;
+ GVNFlagSet previous_changes = *first_time_changes;
+ first_time_depends->Add(instr->DependsOnFlags());
+ first_time_changes->Add(instr->ChangesFlags());
+ if (!(previous_depends == *first_time_depends)) {
+ TRACE_GVN_1("Updated first-time accumulated %s\n",
+ *GetGVNFlagsString(*first_time_depends));
+ }
+ if (!(previous_changes == *first_time_changes)) {
+ TRACE_GVN_1("Updated first-time accumulated %s\n",
+ *GetGVNFlagsString(*first_time_changes));
+ }
+ }
+ instr = next;
+ }
+}
+
+
+bool HGlobalValueNumberer::AllowCodeMotion() {
+ return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
+}
+
+
+bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
+ HBasicBlock* loop_header) {
+ // If we've disabled code motion or we're in a block that unconditionally
+ // deoptimizes, don't move any instructions.
+ return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
+}
+
+
+GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
+ HBasicBlock* dominator, HBasicBlock* dominated) {
+ GVNFlagSet side_effects;
+ for (int i = 0; i < dominated->predecessors()->length(); ++i) {
+ HBasicBlock* block = dominated->predecessors()->at(i);
+ if (dominator->block_id() < block->block_id() &&
+ block->block_id() < dominated->block_id() &&
+ visited_on_paths_.Add(block->block_id())) {
+ side_effects.Add(block_side_effects_[block->block_id()]);
+ if (block->IsLoopHeader()) {
+ side_effects.Add(loop_side_effects_[block->block_id()]);
+ }
+ side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
+ dominator, block));
+ }
+ }
+ return side_effects;
+}
+
+
+// Each instance of this class is like a "stack frame" for the recursive
+// traversal of the dominator tree done during GVN (the stack is handled
+// as a double linked list).
+// We reuse frames when possible so the list length is limited by the depth
+// of the dominator tree but this forces us to initialize each frame calling
+// an explicit "Initialize" method instead of a using constructor.
+class GvnBasicBlockState: public ZoneObject {
+ public:
+ static GvnBasicBlockState* CreateEntry(Zone* zone,
+ HBasicBlock* entry_block,
+ HValueMap* entry_map) {
+ return new(zone)
+ GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
+ }
+
+ HBasicBlock* block() { return block_; }
+ HValueMap* map() { return map_; }
+ HSideEffectMap* dominators() { return &dominators_; }
+
+ GvnBasicBlockState* next_in_dominator_tree_traversal(
+ Zone* zone,
+ HBasicBlock** dominator) {
+ // This assignment needs to happen before calling next_dominated() because
+ // that call can reuse "this" if we are at the last dominated block.
+ *dominator = block();
+ GvnBasicBlockState* result = next_dominated(zone);
+ if (result == NULL) {
+ GvnBasicBlockState* dominator_state = pop();
+ if (dominator_state != NULL) {
+ // This branch is guaranteed not to return NULL because pop() never
+ // returns a state where "is_done() == true".
+ *dominator = dominator_state->block();
+ result = dominator_state->next_dominated(zone);
+ } else {
+ // Unnecessary (we are returning NULL) but done for cleanness.
+ *dominator = NULL;
+ }
+ }
+ return result;
+ }
+
+ private:
+ void Initialize(HBasicBlock* block,
+ HValueMap* map,
+ HSideEffectMap* dominators,
+ bool copy_map,
+ Zone* zone) {
+ block_ = block;
+ map_ = copy_map ? map->Copy(zone) : map;
+ dominated_index_ = -1;
+ length_ = block->dominated_blocks()->length();
+ if (dominators != NULL) {
+ dominators_ = *dominators;
+ }
+ }
+ bool is_done() { return dominated_index_ >= length_; }
+
+ GvnBasicBlockState(GvnBasicBlockState* previous,
+ HBasicBlock* block,
+ HValueMap* map,
+ HSideEffectMap* dominators,
+ Zone* zone)
+ : previous_(previous), next_(NULL) {
+ Initialize(block, map, dominators, true, zone);
+ }
+
+ GvnBasicBlockState* next_dominated(Zone* zone) {
+ dominated_index_++;
+ if (dominated_index_ == length_ - 1) {
+ // No need to copy the map for the last child in the dominator tree.
+ Initialize(block_->dominated_blocks()->at(dominated_index_),
+ map(),
+ dominators(),
+ false,
+ zone);
+ return this;
+ } else if (dominated_index_ < length_) {
+ return push(zone,
+ block_->dominated_blocks()->at(dominated_index_),
+ dominators());
+ } else {
+ return NULL;
+ }
+ }
+
+ GvnBasicBlockState* push(Zone* zone,
+ HBasicBlock* block,
+ HSideEffectMap* dominators) {
+ if (next_ == NULL) {
+ next_ =
+ new(zone) GvnBasicBlockState(this, block, map(), dominators, zone);
+ } else {
+ next_->Initialize(block, map(), dominators, true, zone);
+ }
+ return next_;
+ }
+ GvnBasicBlockState* pop() {
+ GvnBasicBlockState* result = previous_;
+ while (result != NULL && result->is_done()) {
+ TRACE_GVN_2("Backtracking from block B%d to block b%d\n",
+ block()->block_id(),
+ previous_->block()->block_id())
+ result = result->previous_;
+ }
+ return result;
+ }
+
+ GvnBasicBlockState* previous_;
+ GvnBasicBlockState* next_;
+ HBasicBlock* block_;
+ HValueMap* map_;
+ HSideEffectMap dominators_;
+ int dominated_index_;
+ int length_;
+};
+
+// This is a recursive traversal of the dominator tree but it has been turned
+// into a loop to avoid stack overflows.
+// The logical "stack frames" of the recursion are kept in a list of
+// GvnBasicBlockState instances.
+void HGlobalValueNumberer::AnalyzeGraph() {
+ HBasicBlock* entry_block = graph_->entry_block();
+ HValueMap* entry_map = new(zone()) HValueMap(zone());
+ GvnBasicBlockState* current =
+ GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
+
+ while (current != NULL) {
+ HBasicBlock* block = current->block();
+ HValueMap* map = current->map();
+ HSideEffectMap* dominators = current->dominators();
+
+ TRACE_GVN_2("Analyzing block B%d%s\n",
+ block->block_id(),
+ block->IsLoopHeader() ? " (loop header)" : "");
+
+ // If this is a loop header kill everything killed by the loop.
+ if (block->IsLoopHeader()) {
+ map->Kill(loop_side_effects_[block->block_id()]);
+ }
+
+ // Go through all instructions of the current block.
+ HInstruction* instr = block->first();
+ while (instr != NULL) {
+ HInstruction* next = instr->next();
+ GVNFlagSet flags = instr->ChangesFlags();
+ if (!flags.IsEmpty()) {
+ // Clear all instructions in the map that are affected by side effects.
+ // Store instruction as the dominating one for tracked side effects.
+ map->Kill(flags);
+ dominators->Store(flags, instr);
+ TRACE_GVN_2("Instruction %d %s\n", instr->id(),
+ *GetGVNFlagsString(flags));
+ }
+ if (instr->CheckFlag(HValue::kUseGVN)) {
+ ASSERT(!instr->HasObservableSideEffects());
+ HValue* other = map->Lookup(instr);
+ if (other != NULL) {
+ ASSERT(instr->Equals(other) && other->Equals(instr));
+ TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
+ instr->id(),
+ instr->Mnemonic(),
+ other->id(),
+ other->Mnemonic());
+ if (instr->HasSideEffects()) removed_side_effects_ = true;
+ instr->DeleteAndReplaceWith(other);
+ } else {
+ map->Add(instr, zone());
+ }
+ }
+ if (instr->IsLinked() &&
+ instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
+ for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+ HValue* other = dominators->at(i);
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
+ if (instr->DependsOnFlags().Contains(depends_on_flag) &&
+ (other != NULL)) {
+ TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
+ i,
+ instr->id(),
+ instr->Mnemonic(),
+ other->id(),
+ other->Mnemonic());
+ instr->SetSideEffectDominator(changes_flag, other);
+ }
+ }
+ }
+ instr = next;
+ }
+
+ HBasicBlock* dominator_block;
+ GvnBasicBlockState* next =
+ current->next_in_dominator_tree_traversal(zone(), &dominator_block);
+
+ if (next != NULL) {
+ HBasicBlock* dominated = next->block();
+ HValueMap* successor_map = next->map();
+ HSideEffectMap* successor_dominators = next->dominators();
+
+ // Kill everything killed on any path between this block and the
+ // dominated block. We don't have to traverse these paths if the
+ // value map and the dominators list is already empty. If the range
+ // of block ids (block_id, dominated_id) is empty there are no such
+ // paths.
+ if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
+ dominator_block->block_id() + 1 < dominated->block_id()) {
+ visited_on_paths_.Clear();
+ GVNFlagSet side_effects_on_all_paths =
+ CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
+ dominated);
+ successor_map->Kill(side_effects_on_all_paths);
+ successor_dominators->Kill(side_effects_on_all_paths);
+ }
+ }
+ current = next;
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
new file mode 100644
index 0000000000..c39765a1ee
--- /dev/null
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -0,0 +1,123 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_GVN_H_
+#define V8_HYDROGEN_GVN_H_
+
+#include "hydrogen.h"
+#include "hydrogen-instructions.h"
+#include "compiler.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Simple sparse set with O(1) add, contains, and clear.
+class SparseSet {
+ public:
+ SparseSet(Zone* zone, int capacity)
+ : capacity_(capacity),
+ length_(0),
+ dense_(zone->NewArray<int>(capacity)),
+ sparse_(zone->NewArray<int>(capacity)) {
+#ifndef NVALGRIND
+ // Initialize the sparse array to make valgrind happy.
+ memset(sparse_, 0, sizeof(sparse_[0]) * capacity);
+#endif
+ }
+
+ bool Contains(int n) const {
+ ASSERT(0 <= n && n < capacity_);
+ int d = sparse_[n];
+ return 0 <= d && d < length_ && dense_[d] == n;
+ }
+
+ bool Add(int n) {
+ if (Contains(n)) return false;
+ dense_[length_] = n;
+ sparse_[n] = length_;
+ ++length_;
+ return true;
+ }
+
+ void Clear() { length_ = 0; }
+
+ private:
+ int capacity_;
+ int length_;
+ int* dense_;
+ int* sparse_;
+
+ DISALLOW_COPY_AND_ASSIGN(SparseSet);
+};
+
+
+class HGlobalValueNumberer BASE_EMBEDDED {
+ public:
+ HGlobalValueNumberer(HGraph* graph, CompilationInfo* info);
+
+ // Returns true if values with side effects are removed.
+ bool Analyze();
+
+ private:
+ GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
+ HBasicBlock* dominator,
+ HBasicBlock* dominated);
+ void AnalyzeGraph();
+ void ComputeBlockSideEffects();
+ void LoopInvariantCodeMotion();
+ void ProcessLoopBlock(HBasicBlock* block,
+ HBasicBlock* before_loop,
+ GVNFlagSet loop_kills,
+ GVNFlagSet* accumulated_first_time_depends,
+ GVNFlagSet* accumulated_first_time_changes);
+ bool AllowCodeMotion();
+ bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+
+ HGraph* graph() { return graph_; }
+ CompilationInfo* info() { return info_; }
+ Zone* zone() const { return graph_->zone(); }
+
+ HGraph* graph_;
+ CompilationInfo* info_;
+ bool removed_side_effects_;
+
+ // A map of block IDs to their side effects.
+ ZoneList<GVNFlagSet> block_side_effects_;
+
+ // A map of loop header block IDs to their loop's side effects.
+ ZoneList<GVNFlagSet> loop_side_effects_;
+
+ // Used when collecting side effects on paths from dominator to
+ // dominated.
+ SparseSet visited_on_paths_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_GVN_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index b7473879df..b36706b49b 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -108,10 +108,12 @@ Representation HValue::RepresentationFromUses() {
int tagged_count = use_count[Representation::kTagged];
int double_count = use_count[Representation::kDouble];
int int32_count = use_count[Representation::kInteger32];
+ int smi_count = use_count[Representation::kSmi];
if (tagged_count > 0) return Representation::Tagged();
if (double_count > 0) return Representation::Double();
if (int32_count > 0) return Representation::Integer32();
+ if (smi_count > 0) return Representation::Smi();
return Representation::None();
}
@@ -122,20 +124,9 @@ void HValue::UpdateRepresentation(Representation new_rep,
const char* reason) {
Representation r = representation();
if (new_rep.is_more_general_than(r)) {
- // When an HConstant is marked "not convertible to integer", then
- // never try to represent it as an integer.
- if (new_rep.IsInteger32() && !IsConvertibleToInteger()) {
- new_rep = Representation::Tagged();
- if (FLAG_trace_representation) {
- PrintF("Changing #%d %s representation %s -> %s because it's NCTI"
- " (%s want i)\n",
- id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
- }
- } else {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d %s representation %s -> %s based on %s\n",
- id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
- }
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d %s representation %s -> %s based on %s\n",
+ id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
}
ChangeRepresentation(new_rep);
AddDependantsToWorklist(h_infer);
@@ -537,6 +528,17 @@ bool HValue::CheckUsesForFlag(Flag f) {
}
+bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) {
+ bool return_value = false;
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ if (it.value()->IsSimulate()) continue;
+ if (!it.value()->CheckFlag(f)) return false;
+ return_value = true;
+ }
+ return return_value;
+}
+
+
HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
Advance();
}
@@ -987,6 +989,11 @@ void HDummyUse::PrintDataTo(StringStream* stream) {
}
+void HEnvironmentMarker::PrintDataTo(StringStream* stream) {
+ stream->Add("%s var[%d]", kind() == BIND ? "bind" : "lookup", index());
+}
+
+
void HUnaryCall::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
@@ -1062,6 +1069,7 @@ void HBoundsCheck::ApplyIndexChange() {
block()->graph()->GetInvalidContext(), current_index, add_offset);
add->InsertBefore(this);
add->AssumeRepresentation(index()->representation());
+ add->ClearFlag(kCanOverflow);
current_index = add;
}
@@ -1140,19 +1148,17 @@ void HBoundsCheck::PrintDataTo(StringStream* stream) {
void HBoundsCheck::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation r;
- HValue* actual_length = length()->ActualValue();
HValue* actual_index = index()->ActualValue();
- if (key_mode_ == DONT_ALLOW_SMI_KEY ||
- !actual_length->representation().IsTagged()) {
+ HValue* actual_length = length()->ActualValue();
+ Representation index_rep = actual_index->representation();
+ if (!actual_length->representation().IsSmiOrTagged()) {
r = Representation::Integer32();
- } else if (actual_index->representation().IsTagged() ||
- (actual_index->IsConstant() &&
- HConstant::cast(actual_index)->HasSmiValue())) {
- // If the index is tagged, or a constant that holds a Smi, allow the length
- // to be tagged, since it is usually already tagged from loading it out of
- // the length field of a JSArray. This allows for direct comparison without
- // untagging.
- r = Representation::Tagged();
+ } else if ((index_rep.IsTagged() && actual_index->type().IsSmi()) ||
+ index_rep.IsSmi()) {
+ // If the index is smi, allow the length to be smi, since it is usually
+ // already smi from loading it out of the length field of a JSArray. This
+ // allows for direct comparison without untagging.
+ r = Representation::Smi();
} else {
r = Representation::Integer32();
}
@@ -1314,6 +1320,30 @@ const char* HUnaryMathOperation::OpName() const {
}
+Range* HUnaryMathOperation::InferRange(Zone* zone) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32() && value()->HasRange()) {
+ if (op() == kMathAbs) {
+ int upper = value()->range()->upper();
+ int lower = value()->range()->lower();
+ bool spans_zero = value()->range()->CanBeZero();
+ // Math.abs(kMinInt) overflows its representation, on which the
+ // instruction deopts. Hence clamp it to kMaxInt.
+ int abs_upper = upper == kMinInt ? kMaxInt : abs(upper);
+ int abs_lower = lower == kMinInt ? kMaxInt : abs(lower);
+ Range* result =
+ new(zone) Range(spans_zero ? 0 : Min(abs_lower, abs_upper),
+ Max(abs_lower, abs_upper));
+ // In case of Smi representation, clamp Math.abs(Smi::kMinValue) to
+ // Smi::kMaxValue.
+ if (r.IsSmi()) result->ClampToSmi();
+ return result;
+ }
+ }
+ return HValue::InferRange(zone);
+}
+
+
void HUnaryMathOperation::PrintDataTo(StringStream* stream) {
const char* name = OpName();
stream->Add("%s ", name);
@@ -1410,14 +1440,6 @@ HValue* HBitNot::Canonicalize() {
}
-HValue* HArithmeticBinaryOperation::Canonicalize() {
- if (representation().IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) {
- ClearFlag(kCanOverflow);
- }
- return this;
-}
-
-
static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
return arg1->representation().IsSpecialization() &&
arg2->EqualsInteger32Constant(identity);
@@ -1427,13 +1449,13 @@ static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
HValue* HAdd::Canonicalize() {
if (IsIdentityOperation(left(), right(), 0)) return left();
if (IsIdentityOperation(right(), left(), 0)) return right();
- return HArithmeticBinaryOperation::Canonicalize();
+ return this;
}
HValue* HSub::Canonicalize() {
if (IsIdentityOperation(left(), right(), 0)) return left();
- return HArithmeticBinaryOperation::Canonicalize();
+ return this;
}
@@ -1485,7 +1507,7 @@ void HChange::PrintDataTo(StringStream* stream) {
if (CanTruncateToInt32()) stream->Add(" truncating-int32");
if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
- if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
+ if (CheckFlag(kAllowUndefinedAsNaN)) stream->Add(" allow-undefined-as-nan");
}
@@ -1494,6 +1516,11 @@ HValue* HUnaryMathOperation::Canonicalize() {
// If the input is integer32 then we replace the floor instruction
// with its input. This happens before the representation changes are
// introduced.
+
+ // TODO(2205): The above comment is lying. All of this happens
+ // *after* representation changes are introduced. We should check
+ // for value->IsChange() and react accordingly if yes.
+
if (value()->representation().IsInteger32()) return value();
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \
@@ -1687,7 +1714,7 @@ Range* HValue::InferRange(Zone* zone) {
Range* HChange::InferRange(Zone* zone) {
Range* input_range = value()->range();
if (from().IsInteger32() &&
- to().IsTagged() &&
+ to().IsSmiOrTagged() &&
!value()->CheckFlag(HInstruction::kUint32) &&
input_range != NULL && input_range->IsInSmiRange()) {
set_type(HType::Smi());
@@ -1734,11 +1761,13 @@ Range* HAdd::InferRange(Zone* zone) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
- if (!res->AddAndCheckOverflow(b)) {
+ if (!res->AddAndCheckOverflow(b) ||
+ CheckFlag(kAllUsesTruncatingToInt32)) {
ClearFlag(kCanOverflow);
}
- bool m0 = a->CanBeMinusZero() && b->CanBeMinusZero();
- res->set_can_be_minus_zero(m0);
+ if (!CheckFlag(kAllUsesTruncatingToInt32)) {
+ res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeMinusZero());
+ }
return res;
} else {
return HValue::InferRange(zone);
@@ -1751,10 +1780,13 @@ Range* HSub::InferRange(Zone* zone) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
- if (!res->SubAndCheckOverflow(b)) {
+ if (!res->SubAndCheckOverflow(b) ||
+ CheckFlag(kAllUsesTruncatingToInt32)) {
ClearFlag(kCanOverflow);
}
- res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
+ if (!CheckFlag(kAllUsesTruncatingToInt32)) {
+ res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
+ }
return res;
} else {
return HValue::InferRange(zone);
@@ -1768,11 +1800,16 @@ Range* HMul::InferRange(Zone* zone) {
Range* b = right()->range();
Range* res = a->Copy(zone);
if (!res->MulAndCheckOverflow(b)) {
+ // Clearing the kCanOverflow flag when kAllUsesAreTruncatingToInt32
+ // would be wrong, because truncated integer multiplication is too
+ // precise and therefore not the same as converting to Double and back.
ClearFlag(kCanOverflow);
}
- bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
- (a->CanBeNegative() && b->CanBeZero());
- res->set_can_be_minus_zero(m0);
+ if (!CheckFlag(kAllUsesTruncatingToInt32)) {
+ bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
+ (a->CanBeNegative() && b->CanBeZero());
+ res->set_can_be_minus_zero(m0);
+ }
return res;
} else {
return HValue::InferRange(zone);
@@ -1785,12 +1822,14 @@ Range* HDiv::InferRange(Zone* zone) {
Range* a = left()->range();
Range* b = right()->range();
Range* result = new(zone) Range();
- if (a->CanBeMinusZero()) {
- result->set_can_be_minus_zero(true);
- }
+ if (!CheckFlag(kAllUsesTruncatingToInt32)) {
+ if (a->CanBeMinusZero()) {
+ result->set_can_be_minus_zero(true);
+ }
- if (a->CanBeZero() && b->CanBeNegative()) {
- result->set_can_be_minus_zero(true);
+ if (a->CanBeZero() && b->CanBeNegative()) {
+ result->set_can_be_minus_zero(true);
+ }
}
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
@@ -1811,8 +1850,18 @@ Range* HMod::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
- Range* result = new(zone) Range();
- if (a->CanBeMinusZero() || a->CanBeNegative()) {
+
+ // The magnitude of the modulus is bounded by the right operand. Note that
+ // apart for the cases involving kMinInt, the calculation below is the same
+ // as Max(Abs(b->lower()), Abs(b->upper())) - 1.
+ int32_t positive_bound = -(Min(NegAbs(b->lower()), NegAbs(b->upper())) + 1);
+
+ // The result of the modulo operation has the sign of its left operand.
+ bool left_can_be_negative = a->CanBeMinusZero() || a->CanBeNegative();
+ Range* result = new(zone) Range(left_can_be_negative ? -positive_bound : 0,
+ a->CanBePositive() ? positive_bound : 0);
+
+ if (left_can_be_negative && !CheckFlag(kAllUsesTruncatingToInt32)) {
result->set_can_be_minus_zero(true);
}
@@ -1910,12 +1959,12 @@ void HPhi::PrintTo(StringStream* stream) {
value->PrintNameTo(stream);
stream->Add(" ");
}
- stream->Add(" uses:%d_%di_%dd_%dt",
+ stream->Add(" uses:%d_%ds_%di_%dd_%dt",
UseCount(),
+ smi_non_phi_uses() + smi_indirect_uses(),
int32_non_phi_uses() + int32_indirect_uses(),
double_non_phi_uses() + double_indirect_uses(),
tagged_non_phi_uses() + tagged_indirect_uses());
- if (!IsConvertibleToInteger()) stream->Add("_ncti");
PrintRangeTo(stream);
PrintTypeTo(stream);
stream->Add("]");
@@ -1990,8 +2039,9 @@ void HPhi::InitRealUses(int phi_id) {
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
- PrintF("adding to #%d Phi uses of #%d Phi: i%d d%d t%d\n",
+ PrintF("adding to #%d Phi uses of #%d Phi: s%d i%d d%d t%d\n",
id(), other->id(),
+ other->non_phi_uses_[Representation::kSmi],
other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]);
@@ -2016,8 +2066,9 @@ void HSimulate::MergeWith(ZoneList<HSimulate*>* list) {
ZoneList<HValue*>* from_values = &from->values_;
for (int i = 0; i < from_values->length(); ++i) {
if (from->HasAssignedIndexAt(i)) {
- AddAssignedValue(from->GetAssignedIndexAt(i),
- from_values->at(i));
+ int index = from->GetAssignedIndexAt(i);
+ if (HasValueForIndex(index)) continue;
+ AddAssignedValue(index, from_values->at(i));
} else {
if (pop_count_ > 0) {
pop_count_--;
@@ -2038,13 +2089,13 @@ void HSimulate::PrintDataTo(StringStream* stream) {
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
for (int i = values_.length() - 1; i >= 0; --i) {
- if (i > 0) stream->Add(",");
if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
} else {
stream->Add(" push ");
}
values_[i]->PrintNameTo(stream);
+ if (i > 0) stream->Add(",");
}
}
}
@@ -2060,6 +2111,13 @@ void HDeoptimize::PrintDataTo(StringStream* stream) {
}
+void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
+ Zone* zone) {
+ ASSERT(return_target->IsInlineReturnTarget());
+ return_targets_.Add(return_target, zone);
+}
+
+
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name = function()->debug_name()->ToCString();
stream->Add("%s, id=%d", *name, function()->id().ToInt());
@@ -2075,6 +2133,7 @@ static bool IsInteger32(double value) {
HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
unique_id_(),
+ has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(false),
@@ -2088,21 +2147,13 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
double n = handle_->Number();
has_int32_value_ = IsInteger32(n);
int32_value_ = DoubleToInt32(n);
+ has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
double_value_ = n;
has_double_value_ = true;
} else {
type_from_value_ = HType::TypeFromValue(handle_);
is_internalized_string_ = handle_->IsInternalizedString();
}
- if (r.IsNone()) {
- if (has_int32_value_) {
- r = Representation::Integer32();
- } else if (has_double_value_) {
- r = Representation::Double();
- } else {
- r = Representation::Tagged();
- }
- }
Initialize(r);
}
@@ -2116,6 +2167,7 @@ HConstant::HConstant(Handle<Object> handle,
bool boolean_value)
: handle_(handle),
unique_id_(unique_id),
+ has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(is_internalize_string),
@@ -2142,6 +2194,7 @@ HConstant::HConstant(int32_t integer_value,
boolean_value_(integer_value != 0),
int32_value_(integer_value),
double_value_(FastI2D(integer_value)) {
+ has_smi_value_ = Smi::IsValid(int32_value_);
Initialize(r);
}
@@ -2159,11 +2212,23 @@ HConstant::HConstant(double double_value,
boolean_value_(double_value != 0 && !std::isnan(double_value)),
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
+ has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
Initialize(r);
}
void HConstant::Initialize(Representation r) {
+ if (r.IsNone()) {
+ if (has_smi_value_) {
+ r = Representation::Smi();
+ } else if (has_int32_value_) {
+ r = Representation::Integer32();
+ } else if (has_double_value_) {
+ r = Representation::Double();
+ } else {
+ r = Representation::Tagged();
+ }
+ }
set_representation(r);
SetFlag(kUseGVN);
if (representation().IsInteger32()) {
@@ -2173,6 +2238,7 @@ void HConstant::Initialize(Representation r) {
HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
+ if (r.IsSmi() && !has_smi_value_) return NULL;
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
if (has_int32_value_) {
@@ -2247,10 +2313,6 @@ bool HBinaryOperation::IgnoreObservedOutputRepresentation(
current_rep.IsInteger32() &&
// Mul in Integer32 mode would be too precise.
!this->IsMul() &&
- // TODO(jkummerow): Remove blacklisting of Div when the Div
- // instruction has learned not to deopt when the remainder is
- // non-zero but all uses are truncating.
- !this->IsDiv() &&
CheckUsesForFlag(kTruncatingToInt32);
}
@@ -2301,7 +2363,37 @@ void HMathMinMax::InferRepresentation(HInferRepresentation* h_infer) {
Range* HBitwise::InferRange(Zone* zone) {
- if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
+ if (op() == Token::BIT_XOR) {
+ if (left()->HasRange() && right()->HasRange()) {
+ // The maximum value has the high bit, and all bits below, set:
+ // (1 << high) - 1.
+ // If the range can be negative, the minimum int is a negative number with
+ // the high bit, and all bits below, unset:
+ // -(1 << high).
+ // If it cannot be negative, conservatively choose 0 as minimum int.
+ int64_t left_upper = left()->range()->upper();
+ int64_t left_lower = left()->range()->lower();
+ int64_t right_upper = right()->range()->upper();
+ int64_t right_lower = right()->range()->lower();
+
+ if (left_upper < 0) left_upper = ~left_upper;
+ if (left_lower < 0) left_lower = ~left_lower;
+ if (right_upper < 0) right_upper = ~right_upper;
+ if (right_lower < 0) right_lower = ~right_lower;
+
+ int high = MostSignificantBit(
+ static_cast<uint32_t>(
+ left_upper | left_lower | right_upper | right_lower));
+
+ int64_t limit = 1;
+ limit <<= high;
+ int32_t min = (left()->range()->CanBeNegative() ||
+ right()->range()->CanBeNegative())
+ ? static_cast<int32_t>(-limit) : 0;
+ return new(zone) Range(min, static_cast<int32_t>(limit - 1));
+ }
+ return HValue::InferRange(zone);
+ }
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
int32_t left_mask = (left()->range() != NULL)
? left()->range()->Mask()
@@ -2442,18 +2534,22 @@ void HGoto::PrintDataTo(StringStream* stream) {
void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
- Representation rep = Representation::None();
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
- bool observed_integers =
- observed_input_representation(0).IsInteger32() &&
- observed_input_representation(1).IsInteger32();
- bool inputs_are_not_doubles =
- !left_rep.IsDouble() && !right_rep.IsDouble();
- if (observed_integers && inputs_are_not_doubles) {
- rep = Representation::Integer32();
+ Representation observed_left = observed_input_representation(0);
+ Representation observed_right = observed_input_representation(1);
+
+ Representation rep = Representation::None();
+ rep = rep.generalize(observed_left);
+ rep = rep.generalize(observed_right);
+ if (rep.IsNone() || rep.IsSmiOrInteger32()) {
+ if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
+ if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
} else {
rep = Representation::Double();
+ }
+
+ if (rep.IsDouble()) {
// According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
// and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first
@@ -2467,8 +2563,8 @@ void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
// (false). Therefore, any comparisons other than ordered relational
// comparisons must cause a deopt when one of their arguments is undefined.
// See also v8:1434
- if (!Token::IsOrderedRelationalCompareOp(token_)) {
- SetFlag(kDeoptimizeOnUndefined);
+ if (Token::IsOrderedRelationalCompareOp(token_)) {
+ SetFlag(kAllowUndefinedAsNaN);
}
}
ChangeRepresentation(rep);
@@ -2482,7 +2578,7 @@ void HParameter::PrintDataTo(StringStream* stream) {
void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
+ access_.PrintTo(stream);
if (HasTypeCheck()) {
stream->Add(" ");
typecheck()->PrintNameTo(stream);
@@ -2710,11 +2806,14 @@ bool HLoadKeyed::UsesMustHandleHole() const {
return false;
}
+ // Holes are only returned as tagged values.
+ if (!representation().IsTagged()) {
+ return false;
+ }
+
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
- if (!use->IsChange()) {
- return false;
- }
+ if (!use->IsChange()) return false;
}
return true;
@@ -2728,7 +2827,7 @@ bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
- if (use->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
+ if (!use->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
return false;
}
}
@@ -2804,11 +2903,9 @@ void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
void HStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ access_.PrintTo(stream);
stream->Add(" = ");
value()->PrintNameTo(stream);
- stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
if (NeedsWriteBarrier()) {
stream->Add(" (write-barrier)");
}
@@ -2941,20 +3038,6 @@ HType HCheckNonSmi::CalculateInferredType() {
}
-HType HCheckSmi::CalculateInferredType() {
- return HType::Smi();
-}
-
-
-void HCheckSmiOrInt32::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- ASSERT(UseCount() == 1);
- HUseIterator use = uses();
- Representation r = use.value()->RequiredInputRepresentation(use.index());
- UpdateRepresentation(r, h_infer, "checksmiorint32");
-}
-
-
HType HPhi::CalculateInferredType() {
HType result = HType::Uninitialized();
for (int i = 0; i < OperandCount(); ++i) {
@@ -3026,13 +3109,18 @@ HType HUnaryMathOperation::CalculateInferredType() {
}
-HType HStringCharFromCode::CalculateInferredType() {
- return HType::String();
+Representation HUnaryMathOperation::RepresentationFromInputs() {
+ Representation rep = representation();
+ // If any of the actual input representation is more general than what we
+ // have so far but not Tagged, use that representation instead.
+ Representation input_rep = value()->representation();
+ if (!input_rep.IsTagged()) rep = rep.generalize(input_rep);
+ return rep;
}
-HType HAllocateObject::CalculateInferredType() {
- return HType::JSObject();
+HType HStringCharFromCode::CalculateInferredType() {
+ return HType::String();
}
@@ -3216,7 +3304,8 @@ HInstruction* HStringAdd::New(
HConstant* c_right = HConstant::cast(right);
HConstant* c_left = HConstant::cast(left);
if (c_left->HasStringValue() && c_right->HasStringValue()) {
- return new(zone) HConstant(FACTORY->NewConsString(c_left->StringValue(),
+ Factory* factory = Isolate::Current()->factory();
+ return new(zone) HConstant(factory->NewConsString(c_left->StringValue(),
c_right->StringValue()),
Representation::Tagged());
}
@@ -3249,7 +3338,7 @@ HInstruction* HStringLength::New(Zone* zone, HValue* string) {
if (FLAG_fold_constants && string->IsConstant()) {
HConstant* c_string = HConstant::cast(string);
if (c_string->HasStringValue()) {
- return H_CONSTANT_INT32(c_string->StringValue()->length());
+ return new(zone) HConstant(c_string->StringValue()->length());
}
}
return new(zone) HStringLength(string);
@@ -3371,8 +3460,12 @@ HInstruction* HMathMinMax::New(
}
-HInstruction* HMod::New(
- Zone* zone, HValue* context, HValue* left, HValue* right) {
+HInstruction* HMod::New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right,
+ bool has_fixed_right_arg,
+ int fixed_right_arg_value) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -3391,7 +3484,11 @@ HInstruction* HMod::New(
}
}
}
- return new(zone) HMod(context, left, right);
+ return new(zone) HMod(context,
+ left,
+ right,
+ has_fixed_right_arg,
+ fixed_right_arg_value);
}
@@ -3555,45 +3652,11 @@ void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
Representation HPhi::RepresentationFromInputs() {
- bool double_occurred = false;
- bool int32_occurred = false;
+ Representation r = Representation::None();
for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- if (value->IsUnknownOSRValue()) {
- HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
- if (hint_value != NULL) {
- Representation hint = hint_value->representation();
- if (hint.IsTagged()) return hint;
- if (hint.IsDouble()) double_occurred = true;
- if (hint.IsInteger32()) int32_occurred = true;
- }
- continue;
- }
- if (value->representation().IsDouble()) double_occurred = true;
- if (value->representation().IsInteger32()) int32_occurred = true;
- if (value->representation().IsTagged()) {
- if (value->IsConstant()) {
- HConstant* constant = HConstant::cast(value);
- if (constant->IsConvertibleToInteger()) {
- int32_occurred = true;
- } else if (constant->HasNumberValue()) {
- double_occurred = true;
- } else {
- return Representation::Tagged();
- }
- } else {
- if (value->IsPhi() && !IsConvertibleToInteger()) {
- return Representation::Tagged();
- }
- }
- }
+ r = r.generalize(OperandAt(i)->KnownOptimalRepresentation());
}
-
- if (double_occurred) return Representation::Double();
-
- if (int32_occurred) return Representation::Integer32();
-
- return Representation::None();
+ return r;
}
@@ -3649,12 +3712,6 @@ void HSimulate::Verify() {
}
-void HCheckSmi::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
void HCheckNonSmi::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
@@ -3668,4 +3725,140 @@ void HCheckFunction::Verify() {
#endif
+
+HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) {
+ ASSERT(offset >= 0);
+ ASSERT(offset < FixedArray::kHeaderSize);
+ if (offset == FixedArray::kLengthOffset) return ForFixedArrayLength();
+ return HObjectAccess(kInobject, offset);
+}
+
+
+HObjectAccess HObjectAccess::ForJSObjectOffset(int offset) {
+ ASSERT(offset >= 0);
+ Portion portion = kInobject;
+
+ if (offset == JSObject::kElementsOffset) {
+ portion = kElementsPointer;
+ } else if (offset == JSObject::kMapOffset) {
+ portion = kMaps;
+ }
+ return HObjectAccess(portion, offset, Handle<String>::null());
+}
+
+
+HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
+ ASSERT(offset >= 0);
+ Portion portion = kInobject;
+
+ if (offset == JSObject::kElementsOffset) {
+ portion = kElementsPointer;
+ } else if (offset == JSArray::kLengthOffset) {
+ portion = kArrayLengths;
+ } else if (offset == JSObject::kMapOffset) {
+ portion = kMaps;
+ }
+ return HObjectAccess(portion, offset, Handle<String>::null());
+}
+
+
+HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset) {
+ ASSERT(offset >= 0);
+ return HObjectAccess(kBackingStore, offset, Handle<String>::null());
+}
+
+
+HObjectAccess HObjectAccess::ForField(Handle<Map> map,
+ LookupResult *lookup, Handle<String> name) {
+ ASSERT(lookup->IsField() || lookup->IsTransitionToField(*map));
+ int index;
+ if (lookup->IsField()) {
+ index = lookup->GetLocalFieldIndexFromMap(*map);
+ } else {
+ Map* transition = lookup->GetTransitionMapFromMap(*map);
+ int descriptor = transition->LastAdded();
+ index = transition->instance_descriptors()->GetFieldIndex(descriptor) -
+ map->inobject_properties();
+ }
+ if (index < 0) {
+ // Negative property indices are in-object properties, indexed
+ // from the end of the fixed part of the object.
+ int offset = (index * kPointerSize) + map->instance_size();
+ return HObjectAccess(kInobject, offset);
+ } else {
+ // Non-negative property indices are in the properties array.
+ int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
+ return HObjectAccess(kBackingStore, offset, name);
+ }
+}
+
+
+void HObjectAccess::SetGVNFlags(HValue *instr, bool is_store) {
+ // set the appropriate GVN flags for a given load or store instruction
+ if (is_store) {
+ // track dominating allocations in order to eliminate write barriers
+ instr->SetGVNFlag(kDependsOnNewSpacePromotion);
+ instr->SetFlag(HValue::kTrackSideEffectDominators);
+ } else {
+ // try to GVN loads, but don't hoist above map changes
+ instr->SetFlag(HValue::kUseGVN);
+ instr->SetGVNFlag(kDependsOnMaps);
+ }
+
+ switch (portion()) {
+ case kArrayLengths:
+ instr->SetGVNFlag(is_store
+ ? kChangesArrayLengths : kDependsOnArrayLengths);
+ break;
+ case kInobject:
+ instr->SetGVNFlag(is_store
+ ? kChangesInobjectFields : kDependsOnInobjectFields);
+ break;
+ case kDouble:
+ instr->SetGVNFlag(is_store
+ ? kChangesDoubleFields : kDependsOnDoubleFields);
+ break;
+ case kBackingStore:
+ instr->SetGVNFlag(is_store
+ ? kChangesBackingStoreFields : kDependsOnBackingStoreFields);
+ break;
+ case kElementsPointer:
+ instr->SetGVNFlag(is_store
+ ? kChangesElementsPointer : kDependsOnElementsPointer);
+ break;
+ case kMaps:
+ instr->SetGVNFlag(is_store
+ ? kChangesMaps : kDependsOnMaps);
+ break;
+ }
+}
+
+
+void HObjectAccess::PrintTo(StringStream* stream) {
+ stream->Add(".");
+
+ switch (portion()) {
+ case kArrayLengths:
+ stream->Add("%length");
+ break;
+ case kElementsPointer:
+ stream->Add("%elements");
+ break;
+ case kMaps:
+ stream->Add("%map");
+ break;
+ case kDouble: // fall through
+ case kInobject:
+ if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
+ stream->Add("[in-object]");
+ break;
+ case kBackingStore:
+ if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
+ stream->Add("[backing-store]");
+ break;
+ }
+
+ stream->Add("@%d", offset());
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index d06e3184f8..82ed261eb9 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -54,6 +54,7 @@ class LChunkBuilder;
#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
+ V(ArithmeticBinaryOperation) \
V(BinaryOperation) \
V(BitwiseBinaryOperation) \
V(ControlInstruction) \
@@ -65,7 +66,6 @@ class LChunkBuilder;
V(AccessArgumentsAt) \
V(Add) \
V(Allocate) \
- V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -92,8 +92,6 @@ class LChunkBuilder;
V(CheckMaps) \
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(CheckSmiOrInt32) \
V(ClampToUint8) \
V(ClassOfTestAndBranch) \
V(CompareIDAndBranch) \
@@ -111,6 +109,7 @@ class LChunkBuilder;
V(DummyUse) \
V(ElementsKind) \
V(EnterInlined) \
+ V(EnvironmentMarker) \
V(FixedArrayBaseLength) \
V(ForceRepresentation) \
V(FunctionLiteral) \
@@ -265,6 +264,7 @@ class Range: public ZoneObject {
bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; }
bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; }
bool CanBeNegative() const { return lower_ < 0; }
+ bool CanBePositive() const { return upper_ > 0; }
bool Includes(int value) const { return lower_ <= value && upper_ >= value; }
bool IsMostGeneric() const {
return lower_ == kMinInt && upper_ == kMaxInt && CanBeMinusZero();
@@ -272,6 +272,10 @@ class Range: public ZoneObject {
bool IsInSmiRange() const {
return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
}
+ void ClampToSmi() {
+ lower_ = Max(lower_, Smi::kMinValue);
+ upper_ = Min(upper_, Smi::kMaxValue);
+ }
void KeepOrder();
#ifdef DEBUG
void Verify() const;
@@ -791,9 +795,10 @@ class HValue: public ZoneObject {
kCanOverflow,
kBailoutOnMinusZero,
kCanBeDivByZero,
- kDeoptimizeOnUndefined,
+ kAllowUndefinedAsNaN,
kIsArguments,
kTruncatingToInt32,
+ kAllUsesTruncatingToInt32,
// Set after an instruction is killed.
kIsDead,
// Instructions that are allowed to produce full range unsigned integer
@@ -812,7 +817,13 @@ class HValue: public ZoneObject {
kHasNoObservableSideEffects,
// Indicates the instruction is live during dead code elimination.
kIsLive,
- kLastFlag = kIDefsProcessingDone
+
+ // HEnvironmentMarkers are deleted before dead code
+ // elimination takes place, so they can repurpose the kIsLive flag:
+ kEndsLiveRange = kIsLive,
+
+ // TODO(everyone): Don't forget to update this!
+ kLastFlag = kIsLive
};
STATIC_ASSERT(kLastFlag < kBitsPerInt);
@@ -886,7 +897,17 @@ class HValue: public ZoneObject {
}
virtual void AssumeRepresentation(Representation r);
- virtual bool IsConvertibleToInteger() const { return true; }
+ virtual Representation KnownOptimalRepresentation() {
+ Representation r = representation();
+ if (r.IsTagged()) {
+ HType t = type();
+ if (t.IsSmi()) return Representation::Smi();
+ if (t.IsHeapNumber()) return Representation::Double();
+ if (t.IsHeapObject()) return r;
+ return Representation::None();
+ }
+ return r;
+ }
HType type() const { return type_; }
void set_type(HType new_type) {
@@ -977,6 +998,9 @@ class HValue: public ZoneObject {
// Returns true if the flag specified is set for all uses, false otherwise.
bool CheckUsesForFlag(Flag f);
+ // Returns true if the flag specified is set for all uses, and this set
+ // of uses is non-empty.
+ bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f);
GVNFlagSet gvn_flags() const { return gvn_flags_; }
void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
@@ -1020,7 +1044,13 @@ class HValue: public ZoneObject {
}
Range* range() const { return range_; }
+ // TODO(svenpanne) We should really use the null object pattern here.
bool HasRange() const { return range_ != NULL; }
+ bool CanBeNegative() const { return !HasRange() || range()->CanBeNegative(); }
+ bool CanBeZero() const { return !HasRange() || range()->CanBeZero(); }
+ bool RangeCanInclude(int value) const {
+ return !HasRange() || range()->Includes(value);
+ }
void AddNewRange(Range* r, Zone* zone);
void RemoveLastAddedRange();
void ComputeInitialRange(Zone* zone);
@@ -1477,8 +1507,13 @@ class HDebugBreak: public HTemplateInstruction<0> {
class HDeoptimize: public HControlInstruction {
public:
- HDeoptimize(int environment_length, Zone* zone)
- : values_(environment_length, zone) { }
+ HDeoptimize(int environment_length,
+ int first_local_index,
+ int first_expression_index,
+ Zone* zone)
+ : values_(environment_length, zone),
+ first_local_index_(first_local_index),
+ first_expression_index_(first_expression_index) { }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@@ -1501,6 +1536,8 @@ class HDeoptimize: public HControlInstruction {
values_.Add(NULL, zone);
SetOperandAt(values_.length() - 1, value);
}
+ int first_local_index() { return first_local_index_; }
+ int first_expression_index() { return first_expression_index_; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
@@ -1516,6 +1553,8 @@ class HDeoptimize: public HControlInstruction {
private:
ZoneList<HValue*> values_;
+ int first_local_index_;
+ int first_expression_index_;
};
@@ -1563,6 +1602,9 @@ class HBranch: public HUnaryControlInstruction {
}
explicit HBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
+ HBranch(HValue* value, ToBooleanStub::Types expected_input_types)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ expected_input_types_(expected_input_types) { }
virtual Representation RequiredInputRepresentation(int index) {
@@ -1713,15 +1755,16 @@ class HChange: public HUnaryOperation {
HChange(HValue* value,
Representation to,
bool is_truncating,
- bool deoptimize_on_undefined)
+ bool allow_undefined_as_nan)
: HUnaryOperation(value) {
- ASSERT(!value->representation().IsNone() && !to.IsNone());
+ ASSERT(!value->representation().IsNone());
+ ASSERT(!to.IsNone());
ASSERT(!value->representation().Equals(to));
set_representation(to);
SetFlag(kUseGVN);
- if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
+ if (allow_undefined_as_nan) SetFlag(kAllowUndefinedAsNaN);
if (is_truncating) SetFlag(kTruncatingToInt32);
- if (value->type().IsSmi()) {
+ if (value->representation().IsSmi() || value->type().IsSmi()) {
set_type(HType::Smi());
} else {
set_type(HType::TaggedNumber());
@@ -1735,8 +1778,8 @@ class HChange: public HUnaryOperation {
Representation from() const { return value()->representation(); }
Representation to() const { return representation(); }
- bool deoptimize_on_undefined() const {
- return CheckFlag(kDeoptimizeOnUndefined);
+ bool allow_undefined_as_nan() const {
+ return CheckFlag(kAllowUndefinedAsNaN);
}
bool deoptimize_on_minus_zero() const {
return CheckFlag(kBailoutOnMinusZero);
@@ -1766,6 +1809,7 @@ class HClampToUint8: public HUnaryOperation {
explicit HClampToUint8(HValue* value)
: HUnaryOperation(value) {
set_representation(Representation::Integer32());
+ SetFlag(kAllowUndefinedAsNaN);
SetFlag(kUseGVN);
}
@@ -1827,6 +1871,12 @@ class HSimulate: public HInstruction {
void AddPushedValue(HValue* value) {
AddValue(kNoIndex, value);
}
+ int ToOperandIndex(int environment_index) {
+ for (int i = 0; i < assigned_indexes_.length(); ++i) {
+ if (assigned_indexes_[i] == environment_index) return i;
+ }
+ return -1;
+ }
virtual int OperandCount() { return values_.length(); }
virtual HValue* OperandAt(int index) const { return values_[index]; }
@@ -1841,6 +1891,8 @@ class HSimulate: public HInstruction {
#ifdef DEBUG
virtual void Verify();
+ void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
+ Handle<JSFunction> closure() const { return closure_; }
#endif
protected:
@@ -1858,12 +1910,64 @@ class HSimulate: public HInstruction {
// use lists are correctly updated.
SetOperandAt(values_.length() - 1, value);
}
+ bool HasValueForIndex(int index) {
+ for (int i = 0; i < assigned_indexes_.length(); ++i) {
+ if (assigned_indexes_[i] == index) return true;
+ }
+ return false;
+ }
BailoutId ast_id_;
int pop_count_;
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
Zone* zone_;
RemovableSimulate removable_;
+
+#ifdef DEBUG
+ Handle<JSFunction> closure_;
+#endif
+};
+
+
+class HEnvironmentMarker: public HTemplateInstruction<1> {
+ public:
+ enum Kind { BIND, LOOKUP };
+
+ HEnvironmentMarker(Kind kind, int index)
+ : kind_(kind), index_(index), next_simulate_(NULL) { }
+
+ Kind kind() { return kind_; }
+ int index() { return index_; }
+ HSimulate* next_simulate() { return next_simulate_; }
+ void set_next_simulate(HSimulate* simulate) {
+ next_simulate_ = simulate;
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::None();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+#ifdef DEBUG
+ void set_closure(Handle<JSFunction> closure) {
+ ASSERT(closure_.is_null());
+ ASSERT(!closure.is_null());
+ closure_ = closure;
+ }
+ Handle<JSFunction> closure() const { return closure_; }
+#endif
+
+ DECLARE_CONCRETE_INSTRUCTION(EnvironmentMarker);
+
+ private:
+ Kind kind_;
+ int index_;
+ HSimulate* next_simulate_;
+
+#ifdef DEBUG
+ Handle<JSFunction> closure_;
+#endif
};
@@ -1920,7 +2024,8 @@ class HEnterInlined: public HTemplateInstruction<0> {
InliningKind inlining_kind,
Variable* arguments_var,
ZoneList<HValue*>* arguments_values,
- bool undefined_receiver)
+ bool undefined_receiver,
+ Zone* zone)
: closure_(closure),
arguments_count_(arguments_count),
arguments_pushed_(false),
@@ -1928,9 +2033,13 @@ class HEnterInlined: public HTemplateInstruction<0> {
inlining_kind_(inlining_kind),
arguments_var_(arguments_var),
arguments_values_(arguments_values),
- undefined_receiver_(undefined_receiver) {
+ undefined_receiver_(undefined_receiver),
+ return_targets_(2, zone) {
}
+ void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
+ ZoneList<HBasicBlock*>* return_targets() { return &return_targets_; }
+
virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> closure() const { return closure_; }
@@ -1959,6 +2068,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
Variable* arguments_var_;
ZoneList<HValue*>* arguments_values_;
bool undefined_receiver_;
+ ZoneList<HBasicBlock*> return_targets_;
};
@@ -2416,7 +2526,7 @@ class HFixedArrayBaseLength: public HUnaryOperation {
public:
explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
set_type(HType::Smi());
- set_representation(Representation::Tagged());
+ set_representation(Representation::Smi());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnArrayLengths);
}
@@ -2439,7 +2549,7 @@ class HMapEnumLength: public HUnaryOperation {
public:
explicit HMapEnumLength(HValue* value) : HUnaryOperation(value) {
set_type(HType::Smi());
- set_representation(Representation::Tagged());
+ set_representation(Representation::Smi());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
}
@@ -2486,6 +2596,7 @@ class HBitNot: public HUnaryOperation {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetFlag(kTruncatingToInt32);
+ SetFlag(kAllowUndefinedAsNaN);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -2548,7 +2659,10 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
}
}
+ virtual Range* InferRange(Zone* zone);
+
virtual HValue* Canonicalize();
+ virtual Representation RepresentationFromInputs();
BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
@@ -2595,6 +2709,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
UNREACHABLE();
}
SetFlag(kUseGVN);
+ SetFlag(kAllowUndefinedAsNaN);
}
virtual bool IsDeletable() const { return true; }
@@ -2923,74 +3038,19 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
};
-class HCheckSmi: public HUnaryOperation {
- public:
- explicit HCheckSmi(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HCheckSmiOrInt32: public HUnaryOperation {
- public:
- explicit HCheckSmiOrInt32(HValue* value) : HUnaryOperation(value) {
- SetFlag(kFlexibleRepresentation);
- SetFlag(kUseGVN);
- }
-
- virtual int RedefinedOperandIndex() { return 0; }
- virtual Representation RequiredInputRepresentation(int index) {
- return representation();
- }
- virtual void InferRepresentation(HInferRepresentation* h_infer);
-
- virtual Representation observed_input_representation(int index) {
- return Representation::Integer32();
- }
-
- virtual HValue* Canonicalize() {
- if (representation().IsTagged() && !value()->type().IsSmi()) {
- return this;
- } else {
- return value();
- }
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmiOrInt32)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
class HPhi: public HValue {
public:
HPhi(int merged_index, Zone* zone)
: inputs_(2, zone),
merged_index_(merged_index),
- phi_id_(-1),
- is_convertible_to_integer_(true) {
+ phi_id_(-1) {
for (int i = 0; i < Representation::kNumRepresentations; i++) {
non_phi_uses_[i] = 0;
indirect_uses_[i] = 0;
}
ASSERT(merged_index >= 0);
SetFlag(kFlexibleRepresentation);
+ SetFlag(kAllowUndefinedAsNaN);
}
virtual Representation RepresentationFromInputs();
@@ -3001,6 +3061,9 @@ class HPhi: public HValue {
virtual Representation RequiredInputRepresentation(int index) {
return representation();
}
+ virtual Representation KnownOptimalRepresentation() {
+ return representation();
+ }
virtual HType CalculateInferredType();
virtual int OperandCount() { return inputs_.length(); }
virtual HValue* OperandAt(int index) const { return inputs_[index]; }
@@ -3027,6 +3090,9 @@ class HPhi: public HValue {
int tagged_non_phi_uses() const {
return non_phi_uses_[Representation::kTagged];
}
+ int smi_non_phi_uses() const {
+ return non_phi_uses_[Representation::kSmi];
+ }
int int32_non_phi_uses() const {
return non_phi_uses_[Representation::kInteger32];
}
@@ -3036,6 +3102,9 @@ class HPhi: public HValue {
int tagged_indirect_uses() const {
return indirect_uses_[Representation::kTagged];
}
+ int smi_indirect_uses() const {
+ return indirect_uses_[Representation::kSmi];
+ }
int int32_indirect_uses() const {
return indirect_uses_[Representation::kInteger32];
}
@@ -3050,28 +3119,6 @@ class HPhi: public HValue {
}
virtual Opcode opcode() const { return HValue::kPhi; }
- virtual bool IsConvertibleToInteger() const {
- return is_convertible_to_integer_;
- }
-
- void set_is_convertible_to_integer(bool b) {
- is_convertible_to_integer_ = b;
- }
-
- bool AllOperandsConvertibleToInteger() {
- for (int i = 0; i < OperandCount(); ++i) {
- if (!OperandAt(i)->IsConvertibleToInteger()) {
- if (FLAG_trace_representation) {
- HValue* input = OperandAt(i);
- PrintF("#%d %s: Input #%d %s at %d is NCTI\n",
- id(), Mnemonic(), input->id(), input->Mnemonic(), i);
- }
- return false;
- }
- }
- return true;
- }
-
void SimplifyConstantInputs();
// TODO(titzer): we can't eliminate the receiver for generating backtraces
@@ -3095,7 +3142,6 @@ class HPhi: public HValue {
int non_phi_uses_[Representation::kNumRepresentations];
int indirect_uses_[Representation::kNumRepresentations];
int phi_id_;
- bool is_convertible_to_integer_;
};
@@ -3168,11 +3214,11 @@ class HConstant: public HTemplateInstruction<0> {
public:
HConstant(Handle<Object> handle, Representation r);
HConstant(int32_t value,
- Representation r,
+ Representation r = Representation::None(),
bool is_not_in_new_space = true,
Handle<Object> optional_handle = Handle<Object>::null());
HConstant(double value,
- Representation r,
+ Representation r = Representation::None(),
bool is_not_in_new_space = true,
Handle<Object> optional_handle = Handle<Object>::null());
HConstant(Handle<Object> handle,
@@ -3185,11 +3231,12 @@ class HConstant: public HTemplateInstruction<0> {
Handle<Object> handle() {
if (handle_.is_null()) {
+ Factory* factory = Isolate::Current()->factory();
// Default arguments to is_not_in_new_space depend on this heap number
// to be tenured so that it's guaranteed not be be located in new space.
- handle_ = FACTORY->NewNumber(double_value_, TENURED);
+ handle_ = factory->NewNumber(double_value_, TENURED);
}
- ALLOW_HANDLE_DEREF(Isolate::Current(), "smi check");
+ AllowDeferredHandleDereference smi_check;
ASSERT(has_int32_value_ || !handle_->IsSmi());
return handle_;
}
@@ -3232,8 +3279,11 @@ class HConstant: public HTemplateInstruction<0> {
return Representation::None();
}
- virtual bool IsConvertibleToInteger() const {
- return has_int32_value_;
+ virtual Representation KnownOptimalRepresentation() {
+ if (HasSmiValue()) return Representation::Smi();
+ if (HasInteger32Value()) return Representation::Integer32();
+ if (HasNumberValue()) return Representation::Double();
+ return Representation::Tagged();
}
virtual bool EmitAtUses() { return !representation().IsDouble(); }
@@ -3247,9 +3297,7 @@ class HConstant: public HTemplateInstruction<0> {
ASSERT(HasInteger32Value());
return int32_value_;
}
- bool HasSmiValue() const {
- return HasInteger32Value() && Smi::IsValid(Integer32Value());
- }
+ bool HasSmiValue() const { return has_smi_value_; }
bool HasDoubleValue() const { return has_double_value_; }
double DoubleValue() const {
ASSERT(HasDoubleValue());
@@ -3348,6 +3396,7 @@ class HConstant: public HTemplateInstruction<0> {
// int32_value_ and double_value_ hold valid, safe representations
// of the constant. has_int32_value_ implies has_double_value_ but
// not the converse.
+ bool has_smi_value_ : 1;
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
bool is_internalized_string_ : 1; // TODO(yangguo): make this part of HType.
@@ -3416,6 +3465,16 @@ class HBinaryOperation: public HTemplateInstruction<3> {
virtual Representation RepresentationFromInputs();
virtual void AssumeRepresentation(Representation r);
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentation* h_infer,
+ const char* reason) {
+ // By default, binary operations don't handle Smis.
+ if (new_rep.IsSmi()) {
+ new_rep = Representation::Integer32();
+ }
+ HValue::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
virtual bool IsCommutative() const { return false; }
virtual void PrintDataTo(StringStream* stream);
@@ -3560,40 +3619,22 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
};
-enum BoundsCheckKeyMode {
- DONT_ALLOW_SMI_KEY,
- ALLOW_SMI_KEY
-};
-
-
class HBoundsCheckBaseIndexInformation;
class HBoundsCheck: public HTemplateInstruction<2> {
public:
// Normally HBoundsCheck should be created using the
- // HGraphBuilder::AddBoundsCheck() helper, which also guards the index with
- // a HCheckSmiOrInt32 check.
+ // HGraphBuilder::AddBoundsCheck() helper.
// However when building stubs, where we know that the arguments are Int32,
// it makes sense to invoke this constructor directly.
- HBoundsCheck(HValue* index,
- HValue* length,
- BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY,
- Representation r = Representation::None())
- : key_mode_(key_mode), skip_check_(false),
+ HBoundsCheck(HValue* index, HValue* length)
+ : skip_check_(false),
base_(NULL), offset_(0), scale_(0),
responsibility_direction_(DIRECTION_NONE) {
SetOperandAt(0, index);
SetOperandAt(1, length);
- if (r.IsNone()) {
- // In the normal compilation pipeline the representation is flexible
- // (see InferRepresentation).
- SetFlag(kFlexibleRepresentation);
- } else {
- // When compiling stubs we want to set the representation explicitly
- // so the compilation pipeline can skip the HInferRepresentation phase.
- set_representation(r);
- }
+ SetFlag(kFlexibleRepresentation);
SetFlag(kUseGVN);
}
@@ -3630,9 +3671,6 @@ class HBoundsCheck: public HTemplateInstruction<2> {
virtual Representation RequiredInputRepresentation(int arg_index) {
return representation();
}
- virtual Representation observed_input_representation(int index) {
- return Representation::Integer32();
- }
virtual bool IsRelationTrueInternal(NumericRelation relation,
HValue* related_value,
@@ -3661,7 +3699,6 @@ class HBoundsCheck: public HTemplateInstruction<2> {
virtual bool DataEquals(HValue* other) { return true; }
virtual void TryGuaranteeRangeChanging(RangeEvaluationContext* context);
- BoundsCheckKeyMode key_mode_;
bool skip_check_;
HValue* base_;
int offset_;
@@ -3716,6 +3753,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
: HBinaryOperation(context, left, right) {
SetFlag(kFlexibleRepresentation);
SetFlag(kTruncatingToInt32);
+ SetFlag(kAllowUndefinedAsNaN);
SetAllSideEffects();
}
@@ -3740,7 +3778,9 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
HInferRepresentation* h_infer,
const char* reason) {
// We only generate either int32 or generic tagged bitwise operations.
- if (new_rep.IsDouble()) new_rep = Representation::Integer32();
+ if (new_rep.IsSmi() || new_rep.IsDouble()) {
+ new_rep = Representation::Integer32();
+ }
HValue::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -3768,6 +3808,7 @@ class HMathFloorOfDiv: public HBinaryOperation {
if (!right->IsConstant()) {
SetFlag(kCanBeDivByZero);
}
+ SetFlag(kAllowUndefinedAsNaN);
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
@@ -3792,6 +3833,7 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
: HBinaryOperation(context, left, right) {
SetAllSideEffects();
SetFlag(kFlexibleRepresentation);
+ SetFlag(kAllowUndefinedAsNaN);
}
virtual void RepresentationChanged(Representation to) {
@@ -3811,7 +3853,7 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
: representation();
}
- virtual HValue* Canonicalize();
+ DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
private:
virtual bool IsDeletable() const { return true; }
@@ -4389,7 +4431,12 @@ class HMod: public HArithmeticBinaryOperation {
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
- HValue* right);
+ HValue* right,
+ bool has_fixed_right_arg,
+ int fixed_right_arg_value);
+
+ bool has_fixed_right_arg() const { return has_fixed_right_arg_; }
+ int fixed_right_arg_value() const { return fixed_right_arg_value_; }
bool HasPowerOf2Divisor() {
if (right()->IsConstant() &&
@@ -4413,11 +4460,20 @@ class HMod: public HArithmeticBinaryOperation {
virtual Range* InferRange(Zone* zone);
private:
- HMod(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
+ HMod(HValue* context,
+ HValue* left,
+ HValue* right,
+ bool has_fixed_right_arg,
+ int fixed_right_arg_value)
+ : HArithmeticBinaryOperation(context, left, right),
+ has_fixed_right_arg_(has_fixed_right_arg),
+ fixed_right_arg_value_(fixed_right_arg_value) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
}
+
+ const bool has_fixed_right_arg_;
+ const int fixed_right_arg_value_;
};
@@ -4429,9 +4485,8 @@ class HDiv: public HArithmeticBinaryOperation {
HValue* right);
bool HasPowerOf2Divisor() {
- if (right()->IsConstant() &&
- HConstant::cast(right())->HasInteger32Value()) {
- int32_t value = HConstant::cast(right())->Integer32Value();
+ if (right()->IsInteger32Constant()) {
+ int32_t value = right()->GetInteger32Constant();
return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
}
@@ -4755,6 +4810,11 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
return incoming_value_;
}
+ virtual Representation KnownOptimalRepresentation() {
+ if (incoming_value_ == NULL) return Representation::None();
+ return incoming_value_->KnownOptimalRepresentation();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
private:
@@ -4838,48 +4898,6 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
};
-class HAllocateObject: public HTemplateInstruction<1> {
- public:
- HAllocateObject(HValue* context, Handle<JSFunction> constructor)
- : constructor_(constructor) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- constructor_initial_map_ = constructor->has_initial_map()
- ? Handle<Map>(constructor->initial_map())
- : Handle<Map>::null();
- // If slack tracking finished, the instance size and property counts
- // remain unchanged so that we can allocate memory for the object.
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- }
-
- // Maximum instance size for which allocations will be inlined.
- static const int kMaxSize = 64 * kPointerSize;
-
- HValue* context() { return OperandAt(0); }
- Handle<JSFunction> constructor() { return constructor_; }
- Handle<Map> constructor_initial_map() { return constructor_initial_map_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual Handle<Map> GetMonomorphicJSObjectMap() {
- ASSERT(!constructor_initial_map_.is_null());
- return constructor_initial_map_;
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
-
- private:
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // virtual bool IsDeletable() const { return true; }
-
- Handle<JSFunction> constructor_;
- Handle<Map> constructor_initial_map_;
-};
-
-
class HAllocate: public HTemplateInstruction<2> {
public:
enum Flags {
@@ -4898,6 +4916,9 @@ class HAllocate: public HTemplateInstruction<2> {
SetGVNFlag(kChangesNewSpacePromotion);
}
+ // Maximum instance size for which allocations will be inlined.
+ static const int kMaxInlineSize = 64 * kPointerSize;
+
static Flags DefaultFlags() {
return CAN_ALLOCATE_IN_NEW_SPACE;
}
@@ -4922,6 +4943,14 @@ class HAllocate: public HTemplateInstruction<2> {
}
}
+ virtual Handle<Map> GetMonomorphicJSObjectMap() {
+ return known_initial_map_;
+ }
+
+ void set_known_initial_map(Handle<Map> known_initial_map) {
+ known_initial_map_ = known_initial_map;
+ }
+
virtual HType CalculateInferredType();
bool CanAllocateInNewSpace() const {
@@ -4956,6 +4985,7 @@ class HAllocate: public HTemplateInstruction<2> {
private:
HType type_;
Flags flags_;
+ Handle<Map> known_initial_map_;
};
@@ -4999,7 +5029,6 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
new_space_dominator);
}
if (object != new_space_dominator) return true;
- if (object->IsAllocateObject()) return false;
if (object->IsAllocate()) {
return !HAllocate::cast(object)->GuaranteedInNewSpace();
}
@@ -5195,21 +5224,129 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
};
+// Represents an access to a portion of an object, such as the map pointer,
+// array elements pointer, etc, but not accesses to array elements themselves.
+class HObjectAccess {
+ public:
+ inline bool IsInobject() const {
+ return portion() != kBackingStore;
+ }
+
+ inline int offset() const {
+ return OffsetField::decode(value_);
+ }
+
+ inline Handle<String> name() const {
+ return name_;
+ }
+
+ static HObjectAccess ForHeapNumberValue() {
+ return HObjectAccess(kDouble, HeapNumber::kValueOffset);
+ }
+
+ static HObjectAccess ForElementsPointer() {
+ return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
+ }
+
+ static HObjectAccess ForArrayLength() {
+ return HObjectAccess(kArrayLengths, JSArray::kLengthOffset);
+ }
+
+ static HObjectAccess ForFixedArrayLength() {
+ return HObjectAccess(kArrayLengths, FixedArray::kLengthOffset);
+ }
+
+ static HObjectAccess ForPropertiesPointer() {
+ return HObjectAccess(kInobject, JSObject::kPropertiesOffset);
+ }
+
+ static HObjectAccess ForPrototypeOrInitialMap() {
+ return HObjectAccess(kInobject, JSFunction::kPrototypeOrInitialMapOffset);
+ }
+
+ static HObjectAccess ForMap() {
+ return HObjectAccess(kMaps, JSObject::kMapOffset);
+ }
+
+ static HObjectAccess ForAllocationSitePayload() {
+ return HObjectAccess(kInobject, AllocationSiteInfo::kPayloadOffset);
+ }
+
+ // Create an access to an offset in a fixed array header.
+ static HObjectAccess ForFixedArrayHeader(int offset);
+
+ // Create an access to an in-object property in a JSObject.
+ static HObjectAccess ForJSObjectOffset(int offset);
+
+ // Create an access to an in-object property in a JSArray.
+ static HObjectAccess ForJSArrayOffset(int offset);
+
+ // Create an access to the backing store of an object.
+ static HObjectAccess ForBackingStoreOffset(int offset);
+
+ // Create an access to a resolved field (in-object or backing store).
+ static HObjectAccess ForField(Handle<Map> map,
+ LookupResult *lookup, Handle<String> name = Handle<String>::null());
+
+ void PrintTo(StringStream* stream);
+
+ inline bool Equals(HObjectAccess that) const {
+ return value_ == that.value_; // portion and offset must match
+ }
+
+ protected:
+ void SetGVNFlags(HValue *instr, bool is_store);
+
+ private:
+ // internal use only; different parts of an object or array
+ enum Portion {
+ kMaps, // map of an object
+ kArrayLengths, // the length of an array
+ kElementsPointer, // elements pointer
+ kBackingStore, // some field in the backing store
+ kDouble, // some double field
+ kInobject // some other in-object field
+ };
+
+ HObjectAccess(Portion portion, int offset,
+ Handle<String> name = Handle<String>::null())
+ : value_(PortionField::encode(portion) | OffsetField::encode(offset)),
+ name_(name) {
+ ASSERT(this->offset() == offset); // offset should decode correctly
+ ASSERT(this->portion() == portion); // portion should decode correctly
+ }
+
+ class PortionField : public BitField<Portion, 0, 3> {};
+ class OffsetField : public BitField<int, 3, 29> {};
+
+ uint32_t value_; // encodes both portion and offset
+ Handle<String> name_;
+
+ friend class HLoadNamedField;
+ friend class HStoreNamedField;
+
+ inline Portion portion() const {
+ return PortionField::decode(value_);
+ }
+};
+
+
class HLoadNamedField: public HTemplateInstruction<2> {
public:
- HLoadNamedField(HValue* object, bool is_in_object,
- Representation field_representation,
- int offset, HValue* typecheck = NULL)
- : is_in_object_(is_in_object),
- field_representation_(field_representation),
- offset_(offset) {
+ HLoadNamedField(HValue* object,
+ HObjectAccess access,
+ HValue* typecheck = NULL,
+ Representation field_representation
+ = Representation::Tagged())
+ : access_(access),
+ field_representation_(field_representation) {
ASSERT(object != NULL);
SetOperandAt(0, object);
SetOperandAt(1, typecheck != NULL ? typecheck : object);
if (FLAG_track_fields && field_representation.IsSmi()) {
set_type(HType::Smi());
- set_representation(Representation::Tagged());
+ set_representation(field_representation);
} else if (FLAG_track_double_fields && field_representation.IsDouble()) {
set_representation(field_representation);
} else if (FLAG_track_heap_object_fields &&
@@ -5219,31 +5356,7 @@ class HLoadNamedField: public HTemplateInstruction<2> {
} else {
set_representation(Representation::Tagged());
}
- SetFlag(kUseGVN);
- if (FLAG_track_double_fields && representation().IsDouble()) {
- ASSERT(is_in_object);
- ASSERT(offset == HeapNumber::kValueOffset);
- SetGVNFlag(kDependsOnDoubleFields);
- } else if (is_in_object) {
- SetGVNFlag(kDependsOnInobjectFields);
- SetGVNFlag(kDependsOnMaps);
- } else {
- SetGVNFlag(kDependsOnBackingStoreFields);
- SetGVNFlag(kDependsOnMaps);
- }
- }
-
- static HLoadNamedField* NewArrayLength(Zone* zone, HValue* object,
- HValue* typecheck,
- HType type = HType::Tagged()) {
- Representation representation =
- type.IsSmi() ? Representation::Smi() : Representation::Tagged();
- HLoadNamedField* result = new(zone) HLoadNamedField(
- object, true, representation, JSArray::kLengthOffset, typecheck);
- result->set_type(type);
- result->SetGVNFlag(kDependsOnArrayLengths);
- result->ClearGVNFlag(kDependsOnInobjectFields);
- return result;
+ access.SetGVNFlags(this, false);
}
HValue* object() { return OperandAt(0); }
@@ -5253,9 +5366,8 @@ class HLoadNamedField: public HTemplateInstruction<2> {
}
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
- bool is_in_object() const { return is_in_object_; }
+ HObjectAccess access() const { return access_; }
Representation field_representation() const { return representation_; }
- int offset() const { return offset_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
@@ -5267,15 +5379,14 @@ class HLoadNamedField: public HTemplateInstruction<2> {
protected:
virtual bool DataEquals(HValue* other) {
HLoadNamedField* b = HLoadNamedField::cast(other);
- return is_in_object_ == b->is_in_object_ && offset_ == b->offset_;
+ return access_.Equals(b->access_);
}
private:
virtual bool IsDeletable() const { return true; }
- bool is_in_object_;
+ HObjectAccess access_;
Representation field_representation_;
- int offset_;
};
@@ -5377,7 +5488,7 @@ class ArrayInstructionInterface {
static Representation KeyedAccessIndexRequirement(Representation r) {
return r.IsInteger32() ? Representation::Integer32()
- : Representation::Tagged();
+ : Representation::Smi();
}
};
@@ -5411,11 +5522,15 @@ class HLoadKeyed
IsFastDoubleElementsKind(elements_kind));
if (IsFastSmiOrObjectElementsKind(elements_kind)) {
- if (elements_kind == FAST_SMI_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind) &&
+ (!IsHoleyElementsKind(elements_kind) ||
+ mode == NEVER_RETURN_HOLE)) {
set_type(HType::Smi());
+ set_representation(Representation::Smi());
+ } else {
+ set_representation(Representation::Tagged());
}
- set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnArrayElements);
} else {
set_representation(Representation::Double());
@@ -5570,29 +5685,17 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
class HStoreNamedField: public HTemplateInstruction<2> {
public:
HStoreNamedField(HValue* obj,
- Handle<Name> name,
+ HObjectAccess access,
HValue* val,
- bool in_object,
- Representation field_representation,
- int offset)
- : name_(name),
- is_in_object_(in_object),
+ Representation field_representation
+ = Representation::Tagged())
+ : access_(access),
field_representation_(field_representation),
- offset_(offset),
transition_unique_id_(),
new_space_dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, val);
- SetFlag(kTrackSideEffectDominators);
- if (FLAG_track_double_fields && field_representation.IsDouble()) {
- SetGVNFlag(kChangesDoubleFields);
- } else if (is_in_object_) {
- SetGVNFlag(kChangesInobjectFields);
- SetGVNFlag(kDependsOnNewSpacePromotion);
- } else {
- SetGVNFlag(kChangesBackingStoreFields);
- SetGVNFlag(kDependsOnNewSpacePromotion);
- }
+ access.SetGVNFlags(this, true);
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
@@ -5603,7 +5706,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
return field_representation_;
} else if (FLAG_track_fields &&
index == 1 && field_representation_.IsSmi()) {
- return Representation::Integer32();
+ return field_representation_;
}
return Representation::Tagged();
}
@@ -5616,9 +5719,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- Handle<Name> name() const { return name_; }
- bool is_in_object() const { return is_in_object_; }
- int offset() const { return offset_; }
+ HObjectAccess access() const { return access_; }
Handle<Map> transition() const { return transition_; }
UniqueValueId transition_unique_id() const { return transition_unique_id_; }
void set_transition(Handle<Map> map) { transition_ = map; }
@@ -5647,10 +5748,8 @@ class HStoreNamedField: public HTemplateInstruction<2> {
}
private:
- Handle<Name> name_;
- bool is_in_object_;
+ HObjectAccess access_;
Representation field_representation_;
- int offset_;
Handle<Map> transition_;
UniqueValueId transition_unique_id_;
HValue* new_space_dominator_;
@@ -5711,9 +5810,11 @@ class HStoreKeyed
}
if (is_external()) {
SetGVNFlag(kChangesSpecializedArrayElements);
+ SetFlag(kAllowUndefinedAsNaN);
} else if (IsFastDoubleElementsKind(elements_kind)) {
SetGVNFlag(kChangesDoubleArrayElements);
- SetFlag(kDeoptimizeOnUndefined);
+ } else if (IsFastSmiElementsKind(elements_kind)) {
+ SetGVNFlag(kChangesArrayElements);
} else {
SetGVNFlag(kChangesArrayElements);
}
@@ -5728,6 +5829,7 @@ class HStoreKeyed
virtual Representation RequiredInputRepresentation(int index) {
// kind_fast: tagged[int32] = tagged
// kind_double: tagged[int32] = double
+ // kind_smi : tagged[int32] = smi
// kind_external: external[int32] = (double | int32)
if (index == 0) {
return is_external() ? Representation::External()
@@ -5742,6 +5844,10 @@ class HStoreKeyed
return Representation::Double();
}
+ if (IsFastSmiElementsKind(elements_kind())) {
+ return Representation::Smi();
+ }
+
return is_external() ? Representation::Integer32()
: Representation::Tagged();
}
@@ -5752,6 +5858,9 @@ class HStoreKeyed
virtual Representation observed_input_representation(int index) {
if (index < 2) return RequiredInputRepresentation(index);
+ if (IsFastSmiElementsKind(elements_kind())) {
+ return Representation::Smi();
+ }
if (IsDoubleOrFloatElementsKind(elements_kind())) {
return Representation::Double();
}
@@ -6265,6 +6374,7 @@ class HSeqStringSetChar: public HTemplateInstruction<3> {
SetOperandAt(0, string);
SetOperandAt(1, index);
SetOperandAt(2, value);
+ set_representation(Representation::Tagged());
}
String::Encoding encoding() { return encoding_; }
@@ -6273,7 +6383,8 @@ class HSeqStringSetChar: public HTemplateInstruction<3> {
HValue* value() { return OperandAt(2); }
virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ return (index == 0) ? Representation::Tagged()
+ : Representation::Integer32();
}
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 097216ef83..b2badcdb50 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "hydrogen.h"
+#include "hydrogen-gvn.h"
#include <algorithm>
@@ -33,11 +34,13 @@
#include "codegen.h"
#include "full-codegen.h"
#include "hashmap.h"
+#include "hydrogen-environment-liveness.h"
#include "lithium-allocator.h"
#include "parser.h"
#include "scopeinfo.h"
#include "scopes.h"
#include "stub-cache.h"
+#include "typing.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-codegen-ia32.h"
@@ -71,6 +74,7 @@ HBasicBlock::HBasicBlock(HGraph* graph)
last_instruction_index_(-1),
deleted_phis_(4, graph->zone()),
parent_loop_header_(NULL),
+ inlined_entry_block_(NULL),
is_inline_return_target_(false),
is_deoptimizing_(false),
dominates_loop_successors_(false),
@@ -130,10 +134,13 @@ HDeoptimize* HBasicBlock::CreateDeoptimize(
HDeoptimize::UseEnvironment has_uses) {
ASSERT(HasEnvironment());
if (has_uses == HDeoptimize::kNoUses)
- return new(zone()) HDeoptimize(0, zone());
+ return new(zone()) HDeoptimize(0, 0, 0, zone());
HEnvironment* environment = last_environment();
- HDeoptimize* instr = new(zone()) HDeoptimize(environment->length(), zone());
+ int first_local_index = environment->first_local_index();
+ int first_expression_index = environment->first_expression_index();
+ HDeoptimize* instr = new(zone()) HDeoptimize(
+ environment->length(), first_local_index, first_expression_index, zone());
for (int i = 0; i < environment->length(); i++) {
HValue* val = environment->values()->at(i);
instr->AddEnvironmentValue(val, zone());
@@ -156,8 +163,11 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
HSimulate* instr =
new(zone()) HSimulate(ast_id, pop_count, zone(), removable);
+#ifdef DEBUG
+ instr->set_closure(environment->closure());
+#endif
// Order of pushed values: newest (top of stack) first. This allows
- // HSimulate::MergeInto() to easily append additional pushed values
+ // HSimulate::MergeWith() to easily append additional pushed values
// that are older (from further down the stack).
for (int i = 0; i < push_count; ++i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
@@ -192,7 +202,7 @@ void HBasicBlock::Goto(HBasicBlock* block,
if (block->IsInlineReturnTarget()) {
AddInstruction(new(zone()) HLeaveInlined());
- last_environment_ = last_environment()->DiscardInlined(drop_extra);
+ UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
}
if (add_simulate) AddSimulate(BailoutId::None());
@@ -209,7 +219,7 @@ void HBasicBlock::AddLeaveInlined(HValue* return_value,
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
AddInstruction(new(zone()) HLeaveInlined());
- last_environment_ = last_environment()->DiscardInlined(drop_extra);
+ UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
last_environment()->Push(return_value);
AddSimulate(BailoutId::None());
HGoto* instr = new(zone()) HGoto(target);
@@ -224,6 +234,12 @@ void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
}
+void HBasicBlock::UpdateEnvironment(HEnvironment* env) {
+ last_environment_ = env;
+ graph()->update_maximum_environment_size(env->first_expression_index());
+}
+
+
void HBasicBlock::SetJoinId(BailoutId ast_id) {
int length = predecessors_.length();
ASSERT(length > 0);
@@ -511,7 +527,8 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
void HGraph::Verify(bool do_full_verify) const {
Heap::RelocationLock(isolate()->heap());
- ALLOW_HANDLE_DEREF(isolate(), "debug mode verification");
+ AllowHandleDereference allow_deref;
+ AllowDeferredHandleDereference allow_deferred_deref;
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
@@ -591,24 +608,10 @@ void HGraph::Verify(bool do_full_verify) const {
#endif
-HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer,
- int32_t value) {
+HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
+ int32_t value) {
if (!pointer->is_set()) {
- HConstant* constant =
- new(zone()) HConstant(value, Representation::Integer32());
- constant->InsertAfter(GetConstantUndefined());
- pointer->set(constant);
- }
- return pointer->get();
-}
-
-
-HConstant* HGraph::GetConstantSmi(SetOncePointer<HConstant>* pointer,
- int32_t value) {
- if (!pointer->is_set()) {
- HConstant* constant =
- new(zone()) HConstant(Handle<Object>(Smi::FromInt(value), isolate()),
- Representation::Tagged());
+ HConstant* constant = new(zone()) HConstant(value);
constant->InsertAfter(GetConstantUndefined());
pointer->set(constant);
}
@@ -617,17 +620,17 @@ HConstant* HGraph::GetConstantSmi(SetOncePointer<HConstant>* pointer,
HConstant* HGraph::GetConstant0() {
- return GetConstantInt32(&constant_0_, 0);
+ return GetConstant(&constant_0_, 0);
}
HConstant* HGraph::GetConstant1() {
- return GetConstantInt32(&constant_1_, 1);
+ return GetConstant(&constant_1_, 1);
}
HConstant* HGraph::GetConstantMinus1() {
- return GetConstantInt32(&constant_minus1_, -1);
+ return GetConstant(&constant_minus1_, -1);
}
@@ -655,21 +658,11 @@ DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false)
DEFINE_GET_CONSTANT(Null, null, HType::Tagged(), false)
-HConstant* HGraph::GetConstantSmi0() {
- return GetConstantSmi(&constant_smi_0_, 0);
-}
-
-
-HConstant* HGraph::GetConstantSmi1() {
- return GetConstantSmi(&constant_smi_1_, 1);
-}
-
-
#undef DEFINE_GET_CONSTANT
HConstant* HGraph::GetInvalidContext() {
- return GetConstantInt32(&constant_invalid_context_, 0xFFFFC0C7);
+ return GetConstant(&constant_invalid_context_, 0xFFFFC0C7);
}
@@ -731,8 +724,7 @@ HInstruction* HGraphBuilder::IfBuilder::IfCompare(
HInstruction* HGraphBuilder::IfBuilder::IfCompareMap(HValue* left,
Handle<Map> map) {
HCompareMap* compare =
- new(zone()) HCompareMap(left, map,
- first_true_block_, first_false_block_);
+ new(zone()) HCompareMap(left, map, first_true_block_, first_false_block_);
AddCompare(compare);
return compare;
}
@@ -811,9 +803,16 @@ void HGraphBuilder::IfBuilder::Then() {
did_then_ = true;
if (needs_compare_) {
// Handle if's without any expressions, they jump directly to the "else"
- // branch.
- builder_->current_block()->GotoNoSimulate(first_false_block_);
- first_true_block_ = NULL;
+ // branch. However, we must pretend that the "then" branch is reachable,
+ // so that the graph builder visits it and sees any live range extending
+ // constructs within it.
+ HConstant* constant_false = builder_->graph()->GetConstantFalse();
+ ToBooleanStub::Types boolean_type = ToBooleanStub::no_types();
+ boolean_type.Add(ToBooleanStub::BOOLEAN);
+ HBranch* branch =
+ new(zone()) HBranch(constant_false, first_true_block_,
+ first_false_block_, boolean_type);
+ builder_->current_block()->Finish(branch);
}
builder_->set_current_block(first_true_block_);
}
@@ -833,6 +832,7 @@ void HGraphBuilder::IfBuilder::Else() {
void HGraphBuilder::IfBuilder::Deopt() {
HBasicBlock* block = builder_->current_block();
block->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+ builder_->set_current_block(NULL);
if (did_else_) {
first_false_block_ = NULL;
} else {
@@ -843,9 +843,10 @@ void HGraphBuilder::IfBuilder::Deopt() {
void HGraphBuilder::IfBuilder::Return(HValue* value) {
HBasicBlock* block = builder_->current_block();
- block->Finish(new(zone()) HReturn(value,
- builder_->environment()->LookupContext(),
- builder_->graph()->GetConstantMinus1()));
+ HValue* context = builder_->environment()->LookupContext();
+ HValue* parameter_count = builder_->graph()->GetConstantMinus1();
+ block->FinishExit(new(zone()) HReturn(value, context, parameter_count));
+ builder_->set_current_block(NULL);
if (did_else_) {
first_false_block_ = NULL;
} else {
@@ -898,13 +899,11 @@ HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
HValue* HGraphBuilder::LoopBuilder::BeginBody(
HValue* initial,
HValue* terminating,
- Token::Value token,
- Representation input_representation) {
+ Token::Value token) {
HEnvironment* env = builder_->environment();
phi_ = new(zone()) HPhi(env->values()->length(), zone());
header_block_->AddPhi(phi_);
phi_->AddInput(initial);
- phi_->AssumeRepresentation(Representation::Integer32());
env->Push(initial);
builder_->current_block()->GotoNoSimulate(header_block_);
@@ -918,9 +917,6 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
builder_->set_current_block(header_block_);
HCompareIDAndBranch* compare =
new(zone()) HCompareIDAndBranch(phi_, terminating, token);
- compare->set_observed_input_representation(input_representation,
- input_representation);
- compare->AssumeRepresentation(input_representation);
compare->SetSuccessorAt(0, body_block_);
compare->SetSuccessorAt(1, exit_block_);
builder_->current_block()->Finish(compare);
@@ -934,7 +930,6 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
increment_ = HSub::New(zone(), context_, phi_, one);
}
increment_->ClearFlag(HValue::kCanOverflow);
- increment_->AssumeRepresentation(Representation::Integer32());
builder_->AddInstruction(increment_);
return increment_;
} else {
@@ -954,7 +949,6 @@ void HGraphBuilder::LoopBuilder::EndBody() {
increment_ = HSub::New(zone(), context_, phi_, one);
}
increment_->ClearFlag(HValue::kCanOverflow);
- increment_->AssumeRepresentation(Representation::Integer32());
builder_->AddInstruction(increment_);
}
@@ -999,20 +993,8 @@ void HGraphBuilder::AddSimulate(BailoutId id,
}
-HBoundsCheck* HGraphBuilder::AddBoundsCheck(HValue* index,
- HValue* length,
- BoundsCheckKeyMode key_mode,
- Representation r) {
- if (!index->type().IsSmi()) {
- index = new(graph()->zone()) HCheckSmiOrInt32(index);
- AddInstruction(HCheckSmiOrInt32::cast(index));
- }
- if (!length->type().IsSmi()) {
- length = new(graph()->zone()) HCheckSmiOrInt32(length);
- AddInstruction(HCheckSmiOrInt32::cast(length));
- }
- HBoundsCheck* result = new(graph()->zone()) HBoundsCheck(
- index, length, key_mode, r);
+HBoundsCheck* HGraphBuilder::AddBoundsCheck(HValue* index, HValue* length) {
+ HBoundsCheck* result = new(graph()->zone()) HBoundsCheck(index, length);
AddInstruction(result);
return result;
}
@@ -1130,11 +1112,6 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(
switch (elements_kind) {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
- if (!val->type().IsSmi()) {
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone) HCheckSmi(val));
- }
- // Fall through.
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -1195,21 +1172,15 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
new_length->AssumeRepresentation(Representation::Integer32());
new_length->ClearFlag(HValue::kCanOverflow);
- Factory* factory = isolate()->factory();
Representation representation = IsFastElementsKind(kind)
? Representation::Smi() : Representation::Tagged();
- HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
- object,
- factory->length_field_string(),
- new_length, true,
- representation,
- JSArray::kLengthOffset));
- length_store->SetGVNFlag(kChangesArrayLengths);
+ AddStore(object, HObjectAccess::ForArrayLength(), new_length,
+ representation);
}
length_checker.Else();
- AddBoundsCheck(key, length, ALLOW_SMI_KEY);
+ AddBoundsCheck(key, length);
environment()->Push(elements);
length_checker.End();
@@ -1258,8 +1229,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
ElementsKind elements_kind,
bool is_store,
LoadKeyedHoleMode load_mode,
- KeyedAccessStoreMode store_mode,
- Representation checked_index_representation) {
+ KeyedAccessStoreMode store_mode) {
ASSERT(!IsExternalArrayElementsKind(elements_kind) || !is_js_array);
Zone* zone = this->zone();
// No GVNFlag is necessary for ElementsKind if there is an explicit dependency
@@ -1286,8 +1256,9 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
HInstruction* length = NULL;
if (is_js_array) {
- length = AddInstruction(
- HLoadNamedField::NewArrayLength(zone, object, mapcheck, HType::Smi()));
+ length = AddLoad(object, HObjectAccess::ForArrayLength(), mapcheck,
+ Representation::Smi());
+ length->set_type(HType::Smi());
} else {
length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
}
@@ -1314,8 +1285,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
return result;
} else {
ASSERT(store_mode == STANDARD_STORE);
- checked_key = AddBoundsCheck(
- key, length, ALLOW_SMI_KEY, checked_index_representation);
+ checked_key = AddBoundsCheck(key, length);
HLoadExternalArrayPointer* external_elements =
new(zone) HLoadExternalArrayPointer(elements);
AddInstruction(external_elements);
@@ -1328,24 +1298,22 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
fast_elements ||
IsFastDoubleElementsKind(elements_kind));
+ // In case val is stored into a fast smi array, assure that the value is a smi
+ // before manipulating the backing store. Otherwise the actual store may
+ // deopt, leaving the backing store in an invalid state.
if (is_store && IsFastSmiElementsKind(elements_kind) &&
!val->type().IsSmi()) {
- AddInstruction(new(zone) HCheckSmi(val));
+ val = AddInstruction(new(zone) HForceRepresentation(
+ val, Representation::Smi()));
}
if (IsGrowStoreMode(store_mode)) {
NoObservableSideEffectsScope no_effects(this);
-
elements = BuildCheckForCapacityGrow(object, elements, elements_kind,
length, key, is_js_array);
- if (!key->type().IsSmi()) {
- checked_key = AddInstruction(new(zone) HCheckSmiOrInt32(key));
- } else {
- checked_key = key;
- }
+ checked_key = key;
} else {
- checked_key = AddBoundsCheck(
- key, length, ALLOW_SMI_KEY, checked_index_representation);
+ checked_key = AddBoundsCheck(key, length);
if (is_store && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
@@ -1391,7 +1359,7 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
total_size->ClearFlag(HValue::kCanOverflow);
HAllocate::Flags flags = HAllocate::DefaultFlags(kind);
- if (FLAG_pretenure_literals) {
+ if (isolate()->heap()->ShouldGloballyPretenure()) {
// TODO(hpayer): When pretenuring can be internalized, flags can become
// private to HAllocate.
if (IsFastDoubleElementsKind(kind)) {
@@ -1410,32 +1378,28 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
}
-void HGraphBuilder::BuildInitializeElements(HValue* elements,
- ElementsKind kind,
- HValue* capacity) {
- Zone* zone = this->zone();
+void HGraphBuilder::BuildInitializeElementsHeader(HValue* elements,
+ ElementsKind kind,
+ HValue* capacity) {
Factory* factory = isolate()->factory();
Handle<Map> map = IsFastDoubleElementsKind(kind)
? factory->fixed_double_array_map()
: factory->fixed_array_map();
- BuildStoreMap(elements, map);
- Handle<String> fixed_array_length_field_name = factory->length_field_string();
+ AddStoreMapConstant(elements, map);
Representation representation = IsFastElementsKind(kind)
? Representation::Smi() : Representation::Tagged();
- HInstruction* store_length =
- new(zone) HStoreNamedField(elements, fixed_array_length_field_name,
- capacity, true, representation,
- FixedArray::kLengthOffset);
- AddInstruction(store_length);
+ AddStore(elements, HObjectAccess::ForFixedArrayLength(), capacity,
+ representation);
}
-HValue* HGraphBuilder::BuildAllocateAndInitializeElements(HValue* context,
- ElementsKind kind,
- HValue* capacity) {
+HValue* HGraphBuilder::BuildAllocateElementsAndInitializeElementsHeader(
+ HValue* context,
+ ElementsKind kind,
+ HValue* capacity) {
HValue* new_elements = BuildAllocateElements(context, kind, capacity);
- BuildInitializeElements(new_elements, kind, capacity);
+ BuildInitializeElementsHeader(new_elements, kind, capacity);
return new_elements;
}
@@ -1446,7 +1410,7 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
HValue* allocation_site_payload,
HValue* length_field) {
- BuildStoreMap(array, array_map);
+ AddStore(array, HObjectAccess::ForMap(), array_map);
HConstant* empty_fixed_array =
new(zone()) HConstant(
@@ -1454,21 +1418,9 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
Representation::Tagged());
AddInstruction(empty_fixed_array);
- AddInstruction(new(zone()) HStoreNamedField(array,
- isolate()->factory()->properties_field_symbol(),
- empty_fixed_array,
- true,
- Representation::Tagged(),
- JSArray::kPropertiesOffset));
-
- HInstruction* length_store = AddInstruction(
- new(zone()) HStoreNamedField(array,
- isolate()->factory()->length_field_string(),
- length_field,
- true,
- Representation::Tagged(),
- JSArray::kLengthOffset));
- length_store->SetGVNFlag(kChangesArrayLengths);
+ HObjectAccess access = HObjectAccess::ForPropertiesPointer();
+ AddStore(array, access, empty_fixed_array);
+ AddStore(array, HObjectAccess::ForArrayLength(), length_field);
if (mode == TRACK_ALLOCATION_SITE) {
BuildCreateAllocationSiteInfo(array,
@@ -1482,58 +1434,17 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
}
HInnerAllocatedObject* elements = new(zone()) HInnerAllocatedObject(
- array,
- elements_location);
+ array, elements_location);
AddInstruction(elements);
- HInstruction* elements_store = AddInstruction(
- new(zone()) HStoreNamedField(
- array,
- isolate()->factory()->elements_field_string(),
- elements,
- true,
- Representation::Tagged(),
- JSArray::kElementsOffset));
- elements_store->SetGVNFlag(kChangesElementsPointer);
-
+ AddStore(array, HObjectAccess::ForElementsPointer(), elements);
return elements;
}
-HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
- HValue* map) {
- Zone* zone = this->zone();
- Factory* factory = isolate()->factory();
- Handle<String> map_field_name = factory->map_field_string();
- HInstruction* store_map =
- new(zone) HStoreNamedField(object, map_field_name, map,
- true, Representation::Tagged(),
- JSObject::kMapOffset);
- store_map->ClearGVNFlag(kChangesInobjectFields);
- store_map->SetGVNFlag(kChangesMaps);
- AddInstruction(store_map);
- return store_map;
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
- Handle<Map> map) {
- Zone* zone = this->zone();
- HValue* map_constant =
- AddInstruction(new(zone) HConstant(map, Representation::Tagged()));
- return BuildStoreMap(object, map_constant);
-}
-
-
HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
- HValue* typecheck) {
- HLoadNamedField* instr = new(zone()) HLoadNamedField(object, true,
- Representation::Tagged(), JSObject::kElementsOffset, typecheck);
- AddInstruction(instr);
- instr->SetGVNFlag(kDependsOnElementsPointer);
- instr->ClearGVNFlag(kDependsOnMaps);
- instr->ClearGVNFlag(kDependsOnInobjectFields);
- return instr;
+ HValue* typecheck) {
+ return AddLoad(object, HObjectAccess::ForElementsPointer(), typecheck);
}
@@ -1568,16 +1479,13 @@ void HGraphBuilder::BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind) {
Heap* heap = isolate()->heap();
int element_size = IsFastDoubleElementsKind(kind) ? kDoubleSize
: kPointerSize;
- int max_size = heap->MaxNewSpaceAllocationSize() / element_size;
+ int max_size = heap->MaxRegularSpaceAllocationSize() / element_size;
max_size -= JSArray::kSize / element_size;
- HConstant* max_size_constant =
- new(zone) HConstant(max_size, Representation::Integer32());
+ HConstant* max_size_constant = new(zone) HConstant(max_size);
AddInstruction(max_size_constant);
// Since we're forcing Integer32 representation for this HBoundsCheck,
// there's no need to Smi-check the index.
- AddInstruction(new(zone)
- HBoundsCheck(length, max_size_constant,
- DONT_ALLOW_SMI_KEY, Representation::Integer32()));
+ AddInstruction(new(zone) HBoundsCheck(length, max_size_constant));
}
@@ -1586,25 +1494,18 @@ HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
ElementsKind kind,
HValue* length,
HValue* new_capacity) {
- Zone* zone = this->zone();
HValue* context = environment()->LookupContext();
BuildNewSpaceArrayCheck(new_capacity, kind);
- HValue* new_elements =
- BuildAllocateAndInitializeElements(context, kind, new_capacity);
+ HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
+ context, kind, new_capacity);
BuildCopyElements(context, elements, kind,
new_elements, kind,
length, new_capacity);
- Factory* factory = isolate()->factory();
- HInstruction* elements_store = AddInstruction(new(zone) HStoreNamedField(
- object,
- factory->elements_field_string(),
- new_elements, true, Representation::Tagged(),
- JSArray::kElementsOffset));
- elements_store->SetGVNFlag(kChangesElementsPointer);
+ AddStore(object, HObjectAccess::ForElementsPointer(), new_elements);
return new_elements;
}
@@ -1644,10 +1545,15 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* context,
}
}
+ // Since we're about to store a hole value, the store instruction below must
+ // assume an elements kind that supports heap object values.
+ if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+ elements_kind = FAST_HOLEY_ELEMENTS;
+ }
+
if (unfold_loop) {
for (int i = 0; i < initial_capacity; i++) {
- HInstruction* key = AddInstruction(new(zone)
- HConstant(i, Representation::Integer32()));
+ HInstruction* key = AddInstruction(new(zone) HConstant(i));
AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind));
}
} else {
@@ -1690,8 +1596,14 @@ void HGraphBuilder::BuildCopyElements(HValue* context,
from_elements_kind,
ALLOW_RETURN_HOLE));
- AddInstruction(new(zone()) HStoreKeyed(to_elements, key, element,
- to_elements_kind));
+ ElementsKind holey_kind = IsFastSmiElementsKind(to_elements_kind)
+ ? FAST_HOLEY_ELEMENTS : to_elements_kind;
+ HInstruction* holey_store = AddInstruction(
+ new(zone()) HStoreKeyed(to_elements, key, element, holey_kind));
+ // Allow NaN hole values to converted to their tagged counterparts.
+ if (IsFastHoleyElementsKind(to_elements_kind)) {
+ holey_store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ }
builder.EndBody();
@@ -1709,7 +1621,6 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
ElementsKind kind,
int length) {
Zone* zone = this->zone();
- Factory* factory = isolate()->factory();
NoObservableSideEffectsScope no_effects(this);
@@ -1739,16 +1650,8 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
- HInstruction* value = AddInstruction(new(zone) HLoadNamedField(
- boilerplate, true, Representation::Tagged(), i));
- if (i != JSArray::kMapOffset) {
- AddInstruction(new(zone) HStoreNamedField(object,
- factory->empty_string(),
- value, true,
- Representation::Tagged(), i));
- } else {
- BuildStoreMap(object, value);
- }
+ HObjectAccess access = HObjectAccess::ForJSArrayOffset(i);
+ AddStore(object, access, AddLoad(boilerplate, access));
}
}
@@ -1763,21 +1666,12 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
HValue* boilerplate_elements = AddLoadElements(boilerplate);
HValue* object_elements =
AddInstruction(new(zone) HInnerAllocatedObject(object, elems_offset));
- AddInstruction(new(zone) HStoreNamedField(object,
- factory->elements_field_string(),
- object_elements, true,
- Representation::Tagged(),
- JSObject::kElementsOffset));
+ AddStore(object, HObjectAccess::ForElementsPointer(), object_elements);
// Copy the elements array header.
for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
- HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(
- boilerplate_elements, true, Representation::Tagged(), i));
- AddInstruction(new(zone) HStoreNamedField(object_elements,
- factory->empty_string(),
- value, true,
- Representation::Tagged(), i));
+ HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
+ AddStore(object_elements, access, AddLoad(boilerplate_elements, access));
}
// Copy the elements array contents.
@@ -1785,8 +1679,7 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
// copying loops with constant length up to a given boundary and use this
// helper here instead.
for (int i = 0; i < length; i++) {
- HValue* key_constant =
- AddInstruction(new(zone) HConstant(i, Representation::Integer32()));
+ HValue* key_constant = AddInstruction(new(zone) HConstant(i));
HInstruction* value =
AddInstruction(new(zone) HLoadKeyed(boilerplate_elements,
key_constant,
@@ -1805,7 +1698,6 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
void HGraphBuilder::BuildCompareNil(
HValue* value,
- EqualityKind kind,
CompareNilICStub::Types types,
Handle<Map> map,
int position,
@@ -1840,9 +1732,7 @@ void HGraphBuilder::BuildCompareNil(
// emitted below is the actual monomorphic map.
BuildCheckMap(value, map);
} else {
- if (kind == kNonStrictEquality) {
- if_nil.Deopt();
- }
+ if_nil.Deopt();
}
}
@@ -1857,61 +1747,82 @@ HValue* HGraphBuilder::BuildCreateAllocationSiteInfo(HValue* previous_object,
HInnerAllocatedObject(previous_object, previous_object_size);
AddInstruction(alloc_site);
Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
- BuildStoreMap(alloc_site, alloc_site_map);
- AddInstruction(new(zone()) HStoreNamedField(alloc_site,
- isolate()->factory()->payload_string(),
- payload,
- true,
- Representation::Tagged(),
- AllocationSiteInfo::kPayloadOffset));
+ AddStoreMapConstant(alloc_site, alloc_site_map);
+ HObjectAccess access = HObjectAccess::ForAllocationSitePayload();
+ AddStore(alloc_site, access, payload);
return alloc_site;
}
HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* context) {
+ // Get the global context, then the native context
HInstruction* global_object = AddInstruction(new(zone())
- HGlobalObject(context));
- HInstruction* native_context = AddInstruction(new(zone())
- HLoadNamedField(global_object, true, Representation::Tagged(),
- GlobalObject::kNativeContextOffset));
- return native_context;
+ HGlobalObject(context));
+ HObjectAccess access = HObjectAccess::ForJSObjectOffset(
+ GlobalObject::kNativeContextOffset);
+ return AddLoad(global_object, access);
}
HInstruction* HGraphBuilder::BuildGetArrayFunction(HValue* context) {
HInstruction* native_context = BuildGetNativeContext(context);
- int offset = Context::kHeaderSize +
- kPointerSize * Context::ARRAY_FUNCTION_INDEX;
- HInstruction* array_function = AddInstruction(new(zone())
- HLoadNamedField(native_context, true, Representation::Tagged(), offset));
- return array_function;
+ HInstruction* index = AddInstruction(new(zone())
+ HConstant(Context::ARRAY_FUNCTION_INDEX, Representation::Integer32()));
+
+ return AddInstruction(new (zone())
+ HLoadKeyed(native_context, index, NULL, FAST_ELEMENTS));
}
HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* allocation_site_payload,
- AllocationSiteMode mode) :
+ bool disable_allocation_sites) :
builder_(builder),
kind_(kind),
- allocation_site_payload_(allocation_site_payload) {
- if (mode == DONT_TRACK_ALLOCATION_SITE) {
- mode_ = mode;
- } else {
- mode_ = AllocationSiteInfo::GetMode(kind);
- }
+ allocation_site_payload_(allocation_site_payload),
+ constructor_function_(NULL) {
+ mode_ = disable_allocation_sites
+ ? DONT_TRACK_ALLOCATION_SITE
+ : AllocationSiteInfo::GetMode(kind);
+}
+
+
+HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
+ ElementsKind kind,
+ HValue* constructor_function) :
+ builder_(builder),
+ kind_(kind),
+ mode_(DONT_TRACK_ALLOCATION_SITE),
+ allocation_site_payload_(NULL),
+ constructor_function_(constructor_function) {
}
HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode(HValue* context) {
HInstruction* native_context = builder()->BuildGetNativeContext(context);
- int offset = Context::kHeaderSize +
- kPointerSize * Context::JS_ARRAY_MAPS_INDEX;
- HInstruction* map_array = AddInstruction(new(zone())
- HLoadNamedField(native_context, true, Representation::Tagged(), offset));
- offset = kind_ * kPointerSize + FixedArrayBase::kHeaderSize;
- return AddInstruction(new(zone()) HLoadNamedField(
- map_array, true, Representation::Tagged(), offset));
+
+ HInstruction* index = builder()->AddInstruction(new(zone())
+ HConstant(Context::JS_ARRAY_MAPS_INDEX, Representation::Integer32()));
+
+ HInstruction* map_array = builder()->AddInstruction(new(zone())
+ HLoadKeyed(native_context, index, NULL, FAST_ELEMENTS));
+
+ HInstruction* kind_index = builder()->AddInstruction(new(zone())
+ HConstant(kind_, Representation::Integer32()));
+
+ return builder()->AddInstruction(new(zone())
+ HLoadKeyed(map_array, kind_index, NULL, FAST_ELEMENTS));
+}
+
+
+HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
+ // Find the map near the constructor function
+ HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
+ return AddInstruction(
+ builder()->BuildLoadNamedField(constructor_function_,
+ access,
+ Representation::Tagged()));
}
@@ -2001,7 +1912,12 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
AddInstruction(new_object);
// Fill in the fields: map, properties, length
- HValue* map = EmitMapCode(context);
+ HValue* map;
+ if (constructor_function_ != NULL) {
+ map = EmitInternalMapCode();
+ } else {
+ map = EmitMapCode(context);
+ }
elements_location_ = builder()->BuildJSArrayHeader(new_object,
map,
mode_,
@@ -2009,7 +1925,7 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
length_field);
// Initialize the elements
- builder()->BuildInitializeElements(elements_location_, kind_, capacity);
+ builder()->BuildInitializeElementsHeader(elements_location_, kind_, capacity);
if (fill_with_hole) {
builder()->BuildFillElementsWithHole(context, elements_location_, kind_,
@@ -2020,11 +1936,43 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
}
-HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
- TypeFeedbackOracle* oracle)
+HStoreNamedField* HGraphBuilder::AddStore(HValue *object,
+ HObjectAccess access,
+ HValue *val,
+ Representation representation) {
+ HStoreNamedField *instr = new(zone())
+ HStoreNamedField(object, access, val, representation);
+ AddInstruction(instr);
+ return instr;
+}
+
+
+HLoadNamedField* HGraphBuilder::AddLoad(HValue *object,
+ HObjectAccess access,
+ HValue *typecheck,
+ Representation representation) {
+ HLoadNamedField *instr =
+ new(zone()) HLoadNamedField(object, access, typecheck, representation);
+ AddInstruction(instr);
+ return instr;
+}
+
+
+HStoreNamedField* HGraphBuilder::AddStoreMapConstant(HValue *object,
+ Handle<Map> map) {
+ HValue* constant =
+ AddInstruction(new(zone()) HConstant(map, Representation::Tagged()));
+ HStoreNamedField *instr =
+ new(zone()) HStoreNamedField(object, HObjectAccess::ForMap(), constant);
+ AddInstruction(instr);
+ return instr;
+}
+
+
+HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
: HGraphBuilder(info),
function_state_(NULL),
- initial_function_state_(this, info, oracle, NORMAL_RETURN),
+ initial_function_state_(this, info, NORMAL_RETURN),
ast_context_(NULL),
break_scope_(NULL),
inlined_count_(0),
@@ -2103,7 +2051,8 @@ HGraph::HGraph(CompilationInfo* info)
use_optimistic_licm_(false),
has_soft_deoptimize_(false),
depends_on_empty_array_proto_elements_(false),
- type_change_checksum_(0) {
+ type_change_checksum_(0),
+ maximum_environment_size_(0) {
if (info->IsStub()) {
HydrogenCodeStub* stub = info->code_stub();
CodeStubInterfaceDescriptor* descriptor =
@@ -2128,7 +2077,7 @@ HBasicBlock* HGraph::CreateBasicBlock() {
void HGraph::FinalizeUniqueValueIds() {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstruction* instr = blocks()->at(i)->first();
@@ -2142,6 +2091,22 @@ void HGraph::FinalizeUniqueValueIds() {
void HGraph::Canonicalize() {
HPhase phase("H_Canonicalize", this);
+ // Before removing no-op instructions, save their semantic value.
+ // We must be careful not to set the flag unnecessarily, because GVN
+ // cannot identify two instructions when their flag value differs.
+ for (int i = 0; i < blocks()->length(); ++i) {
+ HInstruction* instr = blocks()->at(i)->first();
+ while (instr != NULL) {
+ if (instr->IsArithmeticBinaryOperation() &&
+ instr->representation().IsInteger32() &&
+ instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+ HInstruction::kTruncatingToInt32)) {
+ instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
+ }
+ instr = instr->next();
+ }
+ }
+ // Perform actual Canonicalization pass.
for (int i = 0; i < blocks()->length(); ++i) {
HInstruction* instr = blocks()->at(i)->first();
while (instr != NULL) {
@@ -2842,251 +2807,6 @@ void HRangeAnalysis::AddRange(HValue* value, Range* range) {
}
-void TraceGVN(const char* msg, ...) {
- va_list arguments;
- va_start(arguments, msg);
- OS::VPrint(msg, arguments);
- va_end(arguments);
-}
-
-// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
-// --trace-gvn is off.
-#define TRACE_GVN_1(msg, a1) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1); \
- }
-
-#define TRACE_GVN_2(msg, a1, a2) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2); \
- }
-
-#define TRACE_GVN_3(msg, a1, a2, a3) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3); \
- }
-
-#define TRACE_GVN_4(msg, a1, a2, a3, a4) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3, a4); \
- }
-
-#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3, a4, a5); \
- }
-
-
-HValueMap::HValueMap(Zone* zone, const HValueMap* other)
- : array_size_(other->array_size_),
- lists_size_(other->lists_size_),
- count_(other->count_),
- present_flags_(other->present_flags_),
- array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
- lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
- free_list_head_(other->free_list_head_) {
- OS::MemCopy(
- array_, other->array_, array_size_ * sizeof(HValueMapListElement));
- OS::MemCopy(
- lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
-}
-
-
-void HValueMap::Kill(GVNFlagSet flags) {
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
- if (!present_flags_.ContainsAnyOf(depends_flags)) return;
- present_flags_.RemoveAll();
- for (int i = 0; i < array_size_; ++i) {
- HValue* value = array_[i].value;
- if (value != NULL) {
- // Clear list of collisions first, so we know if it becomes empty.
- int kept = kNil; // List of kept elements.
- int next;
- for (int current = array_[i].next; current != kNil; current = next) {
- next = lists_[current].next;
- HValue* value = lists_[current].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
- // Drop it.
- count_--;
- lists_[current].next = free_list_head_;
- free_list_head_ = current;
- } else {
- // Keep it.
- lists_[current].next = kept;
- kept = current;
- present_flags_.Add(value->gvn_flags());
- }
- }
- array_[i].next = kept;
-
- // Now possibly drop directly indexed element.
- value = array_[i].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
- count_--;
- int head = array_[i].next;
- if (head == kNil) {
- array_[i].value = NULL;
- } else {
- array_[i].value = lists_[head].value;
- array_[i].next = lists_[head].next;
- lists_[head].next = free_list_head_;
- free_list_head_ = head;
- }
- } else {
- present_flags_.Add(value->gvn_flags()); // Keep it.
- }
- }
- }
-}
-
-
-HValue* HValueMap::Lookup(HValue* value) const {
- uint32_t hash = static_cast<uint32_t>(value->Hashcode());
- uint32_t pos = Bound(hash);
- if (array_[pos].value != NULL) {
- if (array_[pos].value->Equals(value)) return array_[pos].value;
- int next = array_[pos].next;
- while (next != kNil) {
- if (lists_[next].value->Equals(value)) return lists_[next].value;
- next = lists_[next].next;
- }
- }
- return NULL;
-}
-
-
-void HValueMap::Resize(int new_size, Zone* zone) {
- ASSERT(new_size > count_);
- // Hashing the values into the new array has no more collisions than in the
- // old hash map, so we can use the existing lists_ array, if we are careful.
-
- // Make sure we have at least one free element.
- if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1, zone);
- }
-
- HValueMapListElement* new_array =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
-
- HValueMapListElement* old_array = array_;
- int old_size = array_size_;
-
- int old_count = count_;
- count_ = 0;
- // Do not modify present_flags_. It is currently correct.
- array_size_ = new_size;
- array_ = new_array;
-
- if (old_array != NULL) {
- // Iterate over all the elements in lists, rehashing them.
- for (int i = 0; i < old_size; ++i) {
- if (old_array[i].value != NULL) {
- int current = old_array[i].next;
- while (current != kNil) {
- Insert(lists_[current].value, zone);
- int next = lists_[current].next;
- lists_[current].next = free_list_head_;
- free_list_head_ = current;
- current = next;
- }
- // Rehash the directly stored value.
- Insert(old_array[i].value, zone);
- }
- }
- }
- USE(old_count);
- ASSERT(count_ == old_count);
-}
-
-
-void HValueMap::ResizeLists(int new_size, Zone* zone) {
- ASSERT(new_size > lists_size_);
-
- HValueMapListElement* new_lists =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
-
- HValueMapListElement* old_lists = lists_;
- int old_size = lists_size_;
-
- lists_size_ = new_size;
- lists_ = new_lists;
-
- if (old_lists != NULL) {
- OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
- }
- for (int i = old_size; i < lists_size_; ++i) {
- lists_[i].next = free_list_head_;
- free_list_head_ = i;
- }
-}
-
-
-void HValueMap::Insert(HValue* value, Zone* zone) {
- ASSERT(value != NULL);
- // Resizing when half of the hashtable is filled up.
- if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
- ASSERT(count_ < array_size_);
- count_++;
- uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
- if (array_[pos].value == NULL) {
- array_[pos].value = value;
- array_[pos].next = kNil;
- } else {
- if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1, zone);
- }
- int new_element_pos = free_list_head_;
- ASSERT(new_element_pos != kNil);
- free_list_head_ = lists_[free_list_head_].next;
- lists_[new_element_pos].value = value;
- lists_[new_element_pos].next = array_[pos].next;
- ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
- array_[pos].next = new_element_pos;
- }
-}
-
-
-HSideEffectMap::HSideEffectMap() : count_(0) {
- memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize);
-}
-
-
-HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
- *this = *other; // Calls operator=.
-}
-
-
-HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
- if (this != &other) {
- OS::MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
- }
- return *this;
-}
-
-void HSideEffectMap::Kill(GVNFlagSet flags) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
- if (data_[i] != NULL) count_--;
- data_[i] = NULL;
- }
- }
-}
-
-
-void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
- if (data_[i] == NULL) count_++;
- data_[i] = instr;
- }
- }
-}
-
-
class HStackCheckEliminator BASE_EMBEDDED {
public:
explicit HStackCheckEliminator(HGraph* graph) : graph_(graph) { }
@@ -3130,581 +2850,6 @@ void HStackCheckEliminator::Process() {
}
-// Simple sparse set with O(1) add, contains, and clear.
-class SparseSet {
- public:
- SparseSet(Zone* zone, int capacity)
- : capacity_(capacity),
- length_(0),
- dense_(zone->NewArray<int>(capacity)),
- sparse_(zone->NewArray<int>(capacity)) {
-#ifndef NVALGRIND
- // Initialize the sparse array to make valgrind happy.
- memset(sparse_, 0, sizeof(sparse_[0]) * capacity);
-#endif
- }
-
- bool Contains(int n) const {
- ASSERT(0 <= n && n < capacity_);
- int d = sparse_[n];
- return 0 <= d && d < length_ && dense_[d] == n;
- }
-
- bool Add(int n) {
- if (Contains(n)) return false;
- dense_[length_] = n;
- sparse_[n] = length_;
- ++length_;
- return true;
- }
-
- void Clear() { length_ = 0; }
-
- private:
- int capacity_;
- int length_;
- int* dense_;
- int* sparse_;
-
- DISALLOW_COPY_AND_ASSIGN(SparseSet);
-};
-
-
-class HGlobalValueNumberer BASE_EMBEDDED {
- public:
- explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
- : graph_(graph),
- info_(info),
- removed_side_effects_(false),
- block_side_effects_(graph->blocks()->length(), graph->zone()),
- loop_side_effects_(graph->blocks()->length(), graph->zone()),
- visited_on_paths_(graph->zone(), graph->blocks()->length()) {
-#ifdef DEBUG
- ASSERT(info->isolate()->optimizing_compiler_thread()->IsOptimizerThread() ||
- !info->isolate()->heap()->IsAllocationAllowed());
-#endif
- block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
- graph_->zone());
- loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
- graph_->zone());
- }
-
- // Returns true if values with side effects are removed.
- bool Analyze();
-
- private:
- GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
- HBasicBlock* dominator,
- HBasicBlock* dominated);
- void AnalyzeGraph();
- void ComputeBlockSideEffects();
- void LoopInvariantCodeMotion();
- void ProcessLoopBlock(HBasicBlock* block,
- HBasicBlock* before_loop,
- GVNFlagSet loop_kills,
- GVNFlagSet* accumulated_first_time_depends,
- GVNFlagSet* accumulated_first_time_changes);
- bool AllowCodeMotion();
- bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
-
- HGraph* graph() { return graph_; }
- CompilationInfo* info() { return info_; }
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- CompilationInfo* info_;
- bool removed_side_effects_;
-
- // A map of block IDs to their side effects.
- ZoneList<GVNFlagSet> block_side_effects_;
-
- // A map of loop header block IDs to their loop's side effects.
- ZoneList<GVNFlagSet> loop_side_effects_;
-
- // Used when collecting side effects on paths from dominator to
- // dominated.
- SparseSet visited_on_paths_;
-};
-
-
-bool HGlobalValueNumberer::Analyze() {
- removed_side_effects_ = false;
- ComputeBlockSideEffects();
- if (FLAG_loop_invariant_code_motion) {
- LoopInvariantCodeMotion();
- }
- AnalyzeGraph();
- return removed_side_effects_;
-}
-
-
-void HGlobalValueNumberer::ComputeBlockSideEffects() {
- // The Analyze phase of GVN can be called multiple times. Clear loop side
- // effects before computing them to erase the contents from previous Analyze
- // passes.
- for (int i = 0; i < loop_side_effects_.length(); ++i) {
- loop_side_effects_[i].RemoveAll();
- }
- for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
- // Compute side effects for the block.
- HBasicBlock* block = graph_->blocks()->at(i);
- HInstruction* instr = block->first();
- int id = block->block_id();
- GVNFlagSet side_effects;
- while (instr != NULL) {
- side_effects.Add(instr->ChangesFlags());
- if (instr->IsSoftDeoptimize()) {
- block_side_effects_[id].RemoveAll();
- side_effects.RemoveAll();
- break;
- }
- instr = instr->next();
- }
- block_side_effects_[id].Add(side_effects);
-
- // Loop headers are part of their loop.
- if (block->IsLoopHeader()) {
- loop_side_effects_[id].Add(side_effects);
- }
-
- // Propagate loop side effects upwards.
- if (block->HasParentLoopHeader()) {
- int header_id = block->parent_loop_header()->block_id();
- loop_side_effects_[header_id].Add(block->IsLoopHeader()
- ? loop_side_effects_[id]
- : side_effects);
- }
- }
-}
-
-
-SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
- char underlying_buffer[kLastFlag * 128];
- Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
-#if DEBUG
- int offset = 0;
- const char* separator = "";
- const char* comma = ", ";
- buffer[0] = 0;
- uint32_t set_depends_on = 0;
- uint32_t set_changes = 0;
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if ((flags.ToIntegral() & (1 << bit)) != 0) {
- if (bit % 2 == 0) {
- set_changes++;
- } else {
- set_depends_on++;
- }
- }
- }
- bool positive_changes = set_changes < (kLastFlag / 2);
- bool positive_depends_on = set_depends_on < (kLastFlag / 2);
- if (set_changes > 0) {
- if (positive_changes) {
- offset += OS::SNPrintF(buffer + offset, "changes [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "changes all except [");
- }
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_changes) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kChanges##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
- if (set_depends_on > 0) {
- separator = "";
- if (set_changes > 0) {
- offset += OS::SNPrintF(buffer + offset, ", ");
- }
- if (positive_depends_on) {
- offset += OS::SNPrintF(buffer + offset, "depends on [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "depends on all except [");
- }
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_depends_on) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kDependsOn##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
-#else
- OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
-#endif
- size_t string_len = strlen(underlying_buffer) + 1;
- ASSERT(string_len <= sizeof(underlying_buffer));
- char* result = new char[strlen(underlying_buffer) + 1];
- OS::MemCopy(result, underlying_buffer, string_len);
- return SmartArrayPointer<char>(result);
-}
-
-
-void HGlobalValueNumberer::LoopInvariantCodeMotion() {
- TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
- graph_->use_optimistic_licm() ? "yes" : "no");
- for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- if (block->IsLoopHeader()) {
- GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
- TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
- block->block_id(),
- *GetGVNFlagsString(side_effects));
-
- GVNFlagSet accumulated_first_time_depends;
- GVNFlagSet accumulated_first_time_changes;
- HBasicBlock* last = block->loop_information()->GetLastBackEdge();
- for (int j = block->block_id(); j <= last->block_id(); ++j) {
- ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects,
- &accumulated_first_time_depends,
- &accumulated_first_time_changes);
- }
- }
- }
-}
-
-
-void HGlobalValueNumberer::ProcessLoopBlock(
- HBasicBlock* block,
- HBasicBlock* loop_header,
- GVNFlagSet loop_kills,
- GVNFlagSet* first_time_depends,
- GVNFlagSet* first_time_changes) {
- HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
- TRACE_GVN_2("Loop invariant motion for B%d %s\n",
- block->block_id(),
- *GetGVNFlagsString(depends_flags));
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
- bool hoisted = false;
- if (instr->CheckFlag(HValue::kUseGVN)) {
- TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
- instr->id(),
- instr->Mnemonic(),
- *GetGVNFlagsString(instr->gvn_flags()),
- *GetGVNFlagsString(loop_kills));
- bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
- if (can_hoist && !graph()->use_optimistic_licm()) {
- can_hoist = block->IsLoopSuccessorDominator();
- }
-
- if (can_hoist) {
- bool inputs_loop_invariant = true;
- for (int i = 0; i < instr->OperandCount(); ++i) {
- if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
- inputs_loop_invariant = false;
- }
- }
-
- if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
- TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id());
- // Move the instruction out of the loop.
- instr->Unlink();
- instr->InsertBefore(pre_header->end());
- if (instr->HasSideEffects()) removed_side_effects_ = true;
- hoisted = true;
- }
- }
- }
- if (!hoisted) {
- // If an instruction is not hoisted, we have to account for its side
- // effects when hoisting later HTransitionElementsKind instructions.
- GVNFlagSet previous_depends = *first_time_depends;
- GVNFlagSet previous_changes = *first_time_changes;
- first_time_depends->Add(instr->DependsOnFlags());
- first_time_changes->Add(instr->ChangesFlags());
- if (!(previous_depends == *first_time_depends)) {
- TRACE_GVN_1("Updated first-time accumulated %s\n",
- *GetGVNFlagsString(*first_time_depends));
- }
- if (!(previous_changes == *first_time_changes)) {
- TRACE_GVN_1("Updated first-time accumulated %s\n",
- *GetGVNFlagsString(*first_time_changes));
- }
- }
- instr = next;
- }
-}
-
-
-bool HGlobalValueNumberer::AllowCodeMotion() {
- return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
-}
-
-
-bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
- HBasicBlock* loop_header) {
- // If we've disabled code motion or we're in a block that unconditionally
- // deoptimizes, don't move any instructions.
- return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
-}
-
-
-GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
- HBasicBlock* dominator, HBasicBlock* dominated) {
- GVNFlagSet side_effects;
- for (int i = 0; i < dominated->predecessors()->length(); ++i) {
- HBasicBlock* block = dominated->predecessors()->at(i);
- if (dominator->block_id() < block->block_id() &&
- block->block_id() < dominated->block_id() &&
- visited_on_paths_.Add(block->block_id())) {
- side_effects.Add(block_side_effects_[block->block_id()]);
- if (block->IsLoopHeader()) {
- side_effects.Add(loop_side_effects_[block->block_id()]);
- }
- side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
- dominator, block));
- }
- }
- return side_effects;
-}
-
-
-// Each instance of this class is like a "stack frame" for the recursive
-// traversal of the dominator tree done during GVN (the stack is handled
-// as a double linked list).
-// We reuse frames when possible so the list length is limited by the depth
-// of the dominator tree but this forces us to initialize each frame calling
-// an explicit "Initialize" method instead of a using constructor.
-class GvnBasicBlockState: public ZoneObject {
- public:
- static GvnBasicBlockState* CreateEntry(Zone* zone,
- HBasicBlock* entry_block,
- HValueMap* entry_map) {
- return new(zone)
- GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
- }
-
- HBasicBlock* block() { return block_; }
- HValueMap* map() { return map_; }
- HSideEffectMap* dominators() { return &dominators_; }
-
- GvnBasicBlockState* next_in_dominator_tree_traversal(
- Zone* zone,
- HBasicBlock** dominator) {
- // This assignment needs to happen before calling next_dominated() because
- // that call can reuse "this" if we are at the last dominated block.
- *dominator = block();
- GvnBasicBlockState* result = next_dominated(zone);
- if (result == NULL) {
- GvnBasicBlockState* dominator_state = pop();
- if (dominator_state != NULL) {
- // This branch is guaranteed not to return NULL because pop() never
- // returns a state where "is_done() == true".
- *dominator = dominator_state->block();
- result = dominator_state->next_dominated(zone);
- } else {
- // Unnecessary (we are returning NULL) but done for cleanness.
- *dominator = NULL;
- }
- }
- return result;
- }
-
- private:
- void Initialize(HBasicBlock* block,
- HValueMap* map,
- HSideEffectMap* dominators,
- bool copy_map,
- Zone* zone) {
- block_ = block;
- map_ = copy_map ? map->Copy(zone) : map;
- dominated_index_ = -1;
- length_ = block->dominated_blocks()->length();
- if (dominators != NULL) {
- dominators_ = *dominators;
- }
- }
- bool is_done() { return dominated_index_ >= length_; }
-
- GvnBasicBlockState(GvnBasicBlockState* previous,
- HBasicBlock* block,
- HValueMap* map,
- HSideEffectMap* dominators,
- Zone* zone)
- : previous_(previous), next_(NULL) {
- Initialize(block, map, dominators, true, zone);
- }
-
- GvnBasicBlockState* next_dominated(Zone* zone) {
- dominated_index_++;
- if (dominated_index_ == length_ - 1) {
- // No need to copy the map for the last child in the dominator tree.
- Initialize(block_->dominated_blocks()->at(dominated_index_),
- map(),
- dominators(),
- false,
- zone);
- return this;
- } else if (dominated_index_ < length_) {
- return push(zone,
- block_->dominated_blocks()->at(dominated_index_),
- dominators());
- } else {
- return NULL;
- }
- }
-
- GvnBasicBlockState* push(Zone* zone,
- HBasicBlock* block,
- HSideEffectMap* dominators) {
- if (next_ == NULL) {
- next_ =
- new(zone) GvnBasicBlockState(this, block, map(), dominators, zone);
- } else {
- next_->Initialize(block, map(), dominators, true, zone);
- }
- return next_;
- }
- GvnBasicBlockState* pop() {
- GvnBasicBlockState* result = previous_;
- while (result != NULL && result->is_done()) {
- TRACE_GVN_2("Backtracking from block B%d to block b%d\n",
- block()->block_id(),
- previous_->block()->block_id())
- result = result->previous_;
- }
- return result;
- }
-
- GvnBasicBlockState* previous_;
- GvnBasicBlockState* next_;
- HBasicBlock* block_;
- HValueMap* map_;
- HSideEffectMap dominators_;
- int dominated_index_;
- int length_;
-};
-
-// This is a recursive traversal of the dominator tree but it has been turned
-// into a loop to avoid stack overflows.
-// The logical "stack frames" of the recursion are kept in a list of
-// GvnBasicBlockState instances.
-void HGlobalValueNumberer::AnalyzeGraph() {
- HBasicBlock* entry_block = graph_->entry_block();
- HValueMap* entry_map = new(zone()) HValueMap(zone());
- GvnBasicBlockState* current =
- GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
-
- while (current != NULL) {
- HBasicBlock* block = current->block();
- HValueMap* map = current->map();
- HSideEffectMap* dominators = current->dominators();
-
- TRACE_GVN_2("Analyzing block B%d%s\n",
- block->block_id(),
- block->IsLoopHeader() ? " (loop header)" : "");
-
- // If this is a loop header kill everything killed by the loop.
- if (block->IsLoopHeader()) {
- map->Kill(loop_side_effects_[block->block_id()]);
- }
-
- // Go through all instructions of the current block.
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
- GVNFlagSet flags = instr->ChangesFlags();
- if (!flags.IsEmpty()) {
- // Clear all instructions in the map that are affected by side effects.
- // Store instruction as the dominating one for tracked side effects.
- map->Kill(flags);
- dominators->Store(flags, instr);
- TRACE_GVN_2("Instruction %d %s\n", instr->id(),
- *GetGVNFlagsString(flags));
- }
- if (instr->CheckFlag(HValue::kUseGVN)) {
- ASSERT(!instr->HasObservableSideEffects());
- HValue* other = map->Lookup(instr);
- if (other != NULL) {
- ASSERT(instr->Equals(other) && other->Equals(instr));
- TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- if (instr->HasSideEffects()) removed_side_effects_ = true;
- instr->DeleteAndReplaceWith(other);
- } else {
- map->Add(instr, zone());
- }
- }
- if (instr->IsLinked() &&
- instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- HValue* other = dominators->at(i);
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(depends_on_flag) &&
- (other != NULL)) {
- TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
- i,
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- instr->SetSideEffectDominator(changes_flag, other);
- }
- }
- }
- instr = next;
- }
-
- HBasicBlock* dominator_block;
- GvnBasicBlockState* next =
- current->next_in_dominator_tree_traversal(zone(), &dominator_block);
-
- if (next != NULL) {
- HBasicBlock* dominated = next->block();
- HValueMap* successor_map = next->map();
- HSideEffectMap* successor_dominators = next->dominators();
-
- // Kill everything killed on any path between this block and the
- // dominated block. We don't have to traverse these paths if the
- // value map and the dominators list is already empty. If the range
- // of block ids (block_id, dominated_id) is empty there are no such
- // paths.
- if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
- dominator_block->block_id() + 1 < dominated->block_id()) {
- visited_on_paths_.Clear();
- GVNFlagSet side_effects_on_all_paths =
- CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
- dominated);
- successor_map->Kill(side_effects_on_all_paths);
- successor_dominators->Kill(side_effects_on_all_paths);
- }
- }
- current = next;
- }
-}
-
-
void HInferRepresentation::AddToWorklist(HValue* current) {
if (current->representation().IsTagged()) return;
if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
@@ -3788,23 +2933,6 @@ void HInferRepresentation::Analyze() {
}
// Use the phi reachability information from step 2 to
- // push information about values which can't be converted to integer
- // without deoptimization through the phi use-def chains, avoiding
- // unnecessary deoptimizations later.
- for (int i = 0; i < phi_count; ++i) {
- HPhi* phi = phi_list->at(i);
- bool cti = phi->AllOperandsConvertibleToInteger();
- if (cti) continue;
-
- for (BitVector::Iterator it(connected_phis.at(i));
- !it.Done();
- it.Advance()) {
- HPhi* phi = phi_list->at(it.Current());
- phi->set_is_convertible_to_integer(false);
- }
- }
-
- // Use the phi reachability information from step 2 to
// sum up the non-phi use counts of all connected phis.
for (int i = 0; i < phi_count; ++i) {
HPhi* phi = phi_list->at(i);
@@ -4011,8 +3139,8 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
// change instructions for them.
HInstruction* new_value = NULL;
bool is_truncating = use_value->CheckFlag(HValue::kTruncatingToInt32);
- bool deoptimize_on_undefined =
- use_value->CheckFlag(HValue::kDeoptimizeOnUndefined);
+ bool allow_undefined_as_nan =
+ use_value->CheckFlag(HValue::kAllowUndefinedAsNaN);
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
@@ -4023,7 +3151,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
if (new_value == NULL) {
new_value = new(zone()) HChange(value, to,
- is_truncating, deoptimize_on_undefined);
+ is_truncating, allow_undefined_as_nan);
}
new_value->InsertBefore(next);
@@ -4080,9 +3208,8 @@ void HGraph::InsertRepresentationChanges() {
HValue* use = it.value();
Representation input_representation =
use->RequiredInputRepresentation(it.index());
- if ((input_representation.IsInteger32() &&
- !use->CheckFlag(HValue::kTruncatingToInt32)) ||
- input_representation.IsDouble()) {
+ if (!input_representation.IsInteger32() ||
+ !use->CheckFlag(HValue::kTruncatingToInt32)) {
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating because of #%d %s\n",
phi->id(), it.value()->id(), it.value()->Mnemonic());
@@ -4130,8 +3257,8 @@ void HGraph::InsertRepresentationChanges() {
void HGraph::RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi) {
- if (phi->CheckFlag(HValue::kDeoptimizeOnUndefined)) return;
- phi->SetFlag(HValue::kDeoptimizeOnUndefined);
+ if (!phi->CheckFlag(HValue::kAllowUndefinedAsNaN)) return;
+ phi->ClearFlag(HValue::kAllowUndefinedAsNaN);
for (int i = 0; i < phi->OperandCount(); ++i) {
HValue* input = phi->OperandAt(i);
if (input->IsPhi()) {
@@ -4151,12 +3278,11 @@ void HGraph::MarkDeoptimizeOnUndefined() {
// if one of its uses has this flag set.
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
- if (phi->representation().IsDouble()) {
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- if (it.value()->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
- RecursivelyMarkPhiDeoptimizeOnUndefined(phi);
- break;
- }
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* use_value = it.value();
+ if (!use_value->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
+ RecursivelyMarkPhiDeoptimizeOnUndefined(phi);
+ break;
}
}
}
@@ -4399,7 +3525,9 @@ void HGraph::ComputeMinusZeroChecks() {
Representation from = change->value()->representation();
ASSERT(from.Equals(change->from()));
if (from.IsInteger32()) {
- ASSERT(change->to().IsTagged() || change->to().IsDouble());
+ ASSERT(change->to().IsTagged() ||
+ change->to().IsDouble() ||
+ change->to().IsSmi());
ASSERT(visited.IsEmpty());
PropagateMinusZeroChecks(change->value(), &visited);
visited.Clear();
@@ -4414,11 +3542,9 @@ void HGraph::ComputeMinusZeroChecks() {
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- TypeFeedbackOracle* oracle,
InliningKind inlining_kind)
: owner_(owner),
compilation_info_(info),
- oracle_(oracle),
call_context_(NULL),
inlining_kind_(inlining_kind),
function_return_(NULL),
@@ -4431,18 +3557,16 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
if (owner->ast_context()->IsTest()) {
HBasicBlock* if_true = owner->graph()->CreateBasicBlock();
HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
- if_true->MarkAsInlineReturnTarget();
- if_false->MarkAsInlineReturnTarget();
+ if_true->MarkAsInlineReturnTarget(owner->current_block());
+ if_false->MarkAsInlineReturnTarget(owner->current_block());
TestContext* outer_test_context = TestContext::cast(owner->ast_context());
Expression* cond = outer_test_context->condition();
- TypeFeedbackOracle* outer_oracle = outer_test_context->oracle();
// The AstContext constructor pushed on the context stack. This newed
// instance is the reason that AstContext can't be BASE_EMBEDDED.
- test_context_ =
- new TestContext(owner, cond, outer_oracle, if_true, if_false);
+ test_context_ = new TestContext(owner, cond, if_true, if_false);
} else {
function_return_ = owner->graph()->CreateBasicBlock();
- function_return()->MarkAsInlineReturnTarget();
+ function_return()->MarkAsInlineReturnTarget(owner->current_block());
}
// Set this after possibly allocating a new TestContext above.
call_context_ = owner->ast_context();
@@ -4673,8 +3797,7 @@ void TestContext::BuildBranch(HValue* value) {
}
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- TypeFeedbackId test_id = condition()->test_id();
- ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
+ ToBooleanStub::Types expected(condition()->to_boolean_types());
HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
builder->current_block()->Finish(test);
@@ -4729,7 +3852,7 @@ void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
- TestContext for_test(this, expr, oracle(), true_block, false_block);
+ TestContext for_test(this, expr, true_block, false_block);
Visit(expr);
}
@@ -4862,6 +3985,11 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
Verify(true);
#endif
+ if (FLAG_analyze_environment_liveness) {
+ EnvironmentSlotLivenessAnalyzer esla(this);
+ esla.AnalyzeAndTrim();
+ }
+
PropagateDeoptimizingMark();
if (!CheckConstPhiUses()) {
*bailout_reason = SmartArrayPointer<char>(StrDup(
@@ -4999,7 +4127,7 @@ class BoundsCheckKey : public ZoneObject {
static BoundsCheckKey* Create(Zone* zone,
HBoundsCheck* check,
int32_t* offset) {
- if (!check->index()->representation().IsInteger32()) return NULL;
+ if (!check->index()->representation().IsSmiOrInteger32()) return NULL;
HValue* index_base = NULL;
HConstant* constant = NULL;
@@ -5095,7 +4223,7 @@ class BoundsCheckBbData: public ZoneObject {
// returns false, otherwise it returns true.
bool CoverCheck(HBoundsCheck* new_check,
int32_t new_offset) {
- ASSERT(new_check->index()->representation().IsInteger32());
+ ASSERT(new_check->index()->representation().IsSmiOrInteger32());
bool keep_new_check = false;
if (new_offset > upper_offset_) {
@@ -5204,8 +4332,8 @@ class BoundsCheckBbData: public ZoneObject {
HValue* index_context = IndexContext(*add, check);
if (index_context == NULL) return false;
- HConstant* new_constant = new(BasicBlock()->zone())
- HConstant(new_offset, Representation::Integer32());
+ HConstant* new_constant = new(BasicBlock()->zone()) HConstant(
+ new_offset, representation);
if (*add == NULL) {
new_constant->InsertBefore(check);
(*add) = HAdd::New(
@@ -5337,7 +4465,7 @@ void HGraph::EliminateRedundantBoundsChecks() {
static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
HValue* index = array_operation->GetKey()->ActualValue();
- if (!index->representation().IsInteger32()) return;
+ if (!index->representation().IsSmiOrInteger32()) return;
HConstant* constant;
HValue* subexpression;
@@ -5449,7 +4577,6 @@ void HGraph::MarkLive(HValue* ref, HValue* instr, ZoneList<HValue*>* worklist) {
if (FLAG_trace_dead_code_elimination) {
HeapStringAllocator allocator;
StringStream stream(&allocator);
- ALLOW_HANDLE_DEREF(isolate(), "debug mode printing");
if (ref != NULL) {
ref->PrintTo(&stream);
} else {
@@ -5738,9 +4865,8 @@ void HOptimizedGraphBuilder::VisitContinueStatement(
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
int drop_extra = 0;
- HBasicBlock* continue_block = break_scope()->Get(stmt->target(),
- CONTINUE,
- &drop_extra);
+ HBasicBlock* continue_block = break_scope()->Get(
+ stmt->target(), BreakAndContinueScope::CONTINUE, &drop_extra);
Drop(drop_extra);
current_block()->Goto(continue_block);
set_current_block(NULL);
@@ -5752,9 +4878,8 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
int drop_extra = 0;
- HBasicBlock* break_block = break_scope()->Get(stmt->target(),
- BREAK,
- &drop_extra);
+ HBasicBlock* break_block = break_scope()->Get(
+ stmt->target(), BreakAndContinueScope::BREAK, &drop_extra);
Drop(drop_extra);
current_block()->Goto(break_block);
set_current_block(NULL);
@@ -5845,6 +4970,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+
// We only optimize switch statements with smi-literal smi comparisons,
// with a bounded number of clauses.
const int kCaseClauseLimit = 128;
@@ -5854,6 +4980,11 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
return Bailout("SwitchStatement: too many clauses");
}
+ ASSERT(stmt->switch_type() != SwitchStatement::UNKNOWN_SWITCH);
+ if (stmt->switch_type() == SwitchStatement::GENERIC_SWITCH) {
+ return Bailout("SwitchStatement: mixed or non-literal switch labels");
+ }
+
HValue* context = environment()->LookupContext();
CHECK_ALIVE(VisitForValue(stmt->tag()));
@@ -5861,34 +4992,11 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
HValue* tag_value = Pop();
HBasicBlock* first_test_block = current_block();
- SwitchType switch_type = UNKNOWN_SWITCH;
-
- // 1. Extract clause type
- for (int i = 0; i < clause_count; ++i) {
- CaseClause* clause = clauses->at(i);
- if (clause->is_default()) continue;
-
- if (switch_type == UNKNOWN_SWITCH) {
- if (clause->label()->IsSmiLiteral()) {
- switch_type = SMI_SWITCH;
- } else if (clause->label()->IsStringLiteral()) {
- switch_type = STRING_SWITCH;
- } else {
- return Bailout("SwitchStatement: non-literal switch label");
- }
- } else if ((switch_type == STRING_SWITCH &&
- !clause->label()->IsStringLiteral()) ||
- (switch_type == SMI_SWITCH &&
- !clause->label()->IsSmiLiteral())) {
- return Bailout("SwitchStatement: mixed label types are not supported");
- }
- }
-
HUnaryControlInstruction* string_check = NULL;
HBasicBlock* not_string_block = NULL;
// Test switch's tag value if all clauses are string literals
- if (switch_type == STRING_SWITCH) {
+ if (stmt->switch_type() == SwitchStatement::STRING_SWITCH) {
string_check = new(zone()) HIsStringAndBranch(tag_value);
first_test_block = graph()->CreateBasicBlock();
not_string_block = graph()->CreateBasicBlock();
@@ -5900,7 +5008,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
set_current_block(first_test_block);
}
- // 2. Build all the tests, with dangling true branches
+ // 1. Build all the tests, with dangling true branches
BailoutId default_id = BailoutId::None();
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
@@ -5908,9 +5016,6 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
default_id = clause->EntryId();
continue;
}
- if (switch_type == SMI_SWITCH) {
- clause->RecordTypeFeedback(oracle());
- }
// Generate a compare and branch.
CHECK_ALIVE(VisitForValue(clause->label()));
@@ -5921,13 +5026,9 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
HControlInstruction* compare;
- if (switch_type == SMI_SWITCH) {
+ if (stmt->switch_type() == SwitchStatement::SMI_SWITCH) {
if (!clause->IsSmiCompare()) {
- // Finish with deoptimize and add uses of enviroment values to
- // account for invisible uses.
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
- set_current_block(NULL);
- break;
+ AddSoftDeoptimize();
}
HCompareIDAndBranch* compare_ =
@@ -5951,7 +5052,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
// Save the current block to use for the default or to join with the
- // exit. This block is NULL if we deoptimized.
+ // exit.
HBasicBlock* last_block = current_block();
if (not_string_block != NULL) {
@@ -5959,7 +5060,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
last_block = CreateJoin(last_block, not_string_block, join_id);
}
- // 3. Loop over the clauses and the linked list of tests in lockstep,
+ // 2. Loop over the clauses and the linked list of tests in lockstep,
// translating the clause bodies.
HBasicBlock* curr_test_block = first_test_block;
HBasicBlock* fall_through_block = NULL;
@@ -6246,7 +5347,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
return Bailout("ForInStatement optimization is disabled");
}
- if (!oracle()->IsForInFastCase(stmt)) {
+ if (stmt->for_in_type() != ForInStatement::FAST_FOR_IN) {
return Bailout("ForInStatement is not fast case");
}
@@ -6272,8 +5373,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
HInstruction* enum_length = AddInstruction(new(zone()) HMapEnumLength(map));
- HInstruction* start_index = AddInstruction(new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(0), isolate()), Representation::Integer32()));
+ HInstruction* start_index = AddInstruction(new(zone()) HConstant(0));
Push(map);
Push(array);
@@ -6359,6 +5459,14 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
}
+void HOptimizedGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ return Bailout("ForOfStatement");
+}
+
+
void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -6387,8 +5495,7 @@ void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
Code* unoptimized_code, FunctionLiteral* expr) {
int start_position = expr->start_position();
- RelocIterator it(unoptimized_code);
- for (;!it.done(); it.next()) {
+ for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
Object* obj = rinfo->target_object();
@@ -6409,8 +5516,7 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<SharedFunctionInfo> shared_info =
- SearchSharedFunctionInfo(info()->shared_info()->code(),
- expr);
+ SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
if (shared_info.is_null()) {
shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
}
@@ -6553,7 +5659,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::PARAMETER:
case Variable::LOCAL: {
- HValue* value = environment()->Lookup(variable);
+ HValue* value = LookupAndMakeLive(variable);
if (value == graph()->GetConstantHole()) {
ASSERT(IsDeclaredVariableMode(variable->mode()) &&
variable->mode() != VAR);
@@ -6690,6 +5796,12 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
int* max_properties,
int* data_size,
int* pointer_size) {
+ if (boilerplate->map()->is_deprecated()) {
+ Handle<Object> result =
+ JSObject::TryMigrateInstance(boilerplate);
+ if (result->IsSmi()) return false;
+ }
+
ASSERT(max_depth >= 0 && *max_properties >= 0);
if (max_depth == 0) return false;
@@ -6836,7 +5948,6 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsInternalizedString()) {
if (property->emit_store()) {
- property->RecordTypeFeedback(oracle());
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
Handle<Map> map = property->GetReceiverType();
@@ -7007,18 +6118,11 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
elements = AddLoadElements(literal);
- HValue* key = AddInstruction(
- new(zone()) HConstant(Handle<Object>(Smi::FromInt(i), isolate()),
- Representation::Integer32()));
+ HValue* key = AddInstruction(new(zone()) HConstant(i));
switch (boilerplate_elements_kind) {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
- if (!value->type().IsSmi()) {
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone()) HCheckSmi(value));
- // Fall through.
- }
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -7064,20 +6168,6 @@ static bool ComputeLoadStoreField(Handle<Map> type,
}
-static int ComputeLoadStoreFieldIndex(Handle<Map> type,
- LookupResult* lookup) {
- ASSERT(lookup->IsField() || lookup->IsTransitionToField(*type));
- if (lookup->IsField()) {
- return lookup->GetLocalFieldIndexFromMap(*type);
- } else {
- Map* transition = lookup->GetTransitionMapFromMap(*type);
- int descriptor = transition->LastAdded();
- int index = transition->instance_descriptors()->GetFieldIndex(descriptor);
- return index - type->inobject_properties();
- }
-}
-
-
static Representation ComputeLoadStoreRepresentation(Handle<Map> type,
LookupResult* lookup) {
if (lookup->IsField()) {
@@ -7142,43 +6232,37 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
zone()));
}
- int index = ComputeLoadStoreFieldIndex(map, lookup);
- bool is_in_object = index < 0;
+ HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
Representation representation = ComputeLoadStoreRepresentation(map, lookup);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- offset += map->instance_size();
- } else {
- offset += FixedArray::kHeaderSize;
- }
bool transition_to_field = lookup->IsTransitionToField(*map);
+
+ HStoreNamedField *instr;
if (FLAG_track_double_fields && representation.IsDouble()) {
if (transition_to_field) {
+ // The store requires a mutable HeapNumber to be allocated.
NoObservableSideEffectsScope no_side_effects(this);
HInstruction* heap_number_size = AddInstruction(new(zone()) HConstant(
HeapNumber::kSize, Representation::Integer32()));
HInstruction* double_box = AddInstruction(new(zone()) HAllocate(
environment()->LookupContext(), heap_number_size,
HType::HeapNumber(), HAllocate::CAN_ALLOCATE_IN_NEW_SPACE));
- BuildStoreMap(double_box, isolate()->factory()->heap_number_map());
- AddInstruction(new(zone()) HStoreNamedField(
- double_box, name, value, true,
- Representation::Double(), HeapNumber::kValueOffset));
- value = double_box;
- representation = Representation::Tagged();
+ AddStoreMapConstant(double_box, isolate()->factory()->heap_number_map());
+ AddStore(double_box, HObjectAccess::ForHeapNumberValue(),
+ value, Representation::Double());
+ instr = new(zone()) HStoreNamedField(object, field_access, double_box);
} else {
- HInstruction* double_box = AddInstruction(new(zone()) HLoadNamedField(
- object, is_in_object, Representation::Tagged(), offset));
+ // Already holds a HeapNumber; load the box and write its value field.
+ HInstruction* double_box = AddLoad(object, field_access);
double_box->set_type(HType::HeapNumber());
- return new(zone()) HStoreNamedField(
- double_box, name, value, true,
- Representation::Double(), HeapNumber::kValueOffset);
+ instr = new(zone()) HStoreNamedField(double_box,
+ HObjectAccess::ForHeapNumberValue(), value, Representation::Double());
}
+ } else {
+ // This is a non-double store.
+ instr = new(zone()) HStoreNamedField(
+ object, field_access, value, representation);
}
- HStoreNamedField* instr = new(zone()) HStoreNamedField(
- object, name, value, is_in_object, representation, offset);
+
if (transition_to_field) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
instr->set_transition(transition);
@@ -7248,9 +6332,10 @@ bool HOptimizedGraphBuilder::HandlePolymorphicArrayLengthLoad(
BuildCheckNonSmi(object);
HInstruction* typecheck =
- AddInstruction(HCheckMaps::New(object, types, zone()));
- HInstruction* instr =
- HLoadNamedField::NewArrayLength(zone(), object, typecheck);
+ AddInstruction(HCheckMaps::New(object, types, zone()));
+ HInstruction* instr = new(zone())
+ HLoadNamedField(object, HObjectAccess::ForArrayLength(), typecheck);
+
instr->set_position(expr->position());
ast_context()->ReturnInstruction(instr, expr->id());
return true;
@@ -7270,53 +6355,42 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
// Use monomorphic load if property lookup results in the same field index
// for all maps. Requires special map check on the set of all handled maps.
HInstruction* instr = NULL;
- if (types->length() > 0 && types->length() <= kMaxLoadPolymorphism) {
- LookupResult lookup(isolate());
- int previous_field_offset = 0;
- bool previous_field_is_in_object = false;
- Representation representation = Representation::None();
- int count;
- for (count = 0; count < types->length(); ++count) {
- Handle<Map> map = types->at(count);
- if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
-
- int index = ComputeLoadStoreFieldIndex(map, &lookup);
- Representation new_representation =
- ComputeLoadStoreRepresentation(map, &lookup);
- bool is_in_object = index < 0;
- int offset = index * kPointerSize;
-
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- offset += map->instance_size();
- } else {
- offset += FixedArray::kHeaderSize;
- }
-
- if (count == 0) {
- previous_field_offset = offset;
- previous_field_is_in_object = is_in_object;
- representation = new_representation;
- } else if (offset != previous_field_offset ||
- is_in_object != previous_field_is_in_object ||
- (FLAG_track_fields &&
- !representation.IsCompatibleForLoad(new_representation))) {
- break;
- }
-
- representation = representation.generalize(new_representation);
- }
-
- if (count == types->length()) {
- AddInstruction(HCheckMaps::New(object, types, zone()));
- instr = DoBuildLoadNamedField(
- object, previous_field_is_in_object,
- representation, previous_field_offset);
+ LookupResult lookup(isolate());
+ int count;
+ Representation representation = Representation::None();
+ HObjectAccess access = HObjectAccess::ForMap(); // initial value unused.
+ for (count = 0;
+ count < types->length() && count < kMaxLoadPolymorphism;
+ ++count) {
+ Handle<Map> map = types->at(count);
+ if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
+
+ HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
+ Representation new_representation =
+ ComputeLoadStoreRepresentation(map, &lookup);
+
+ if (count == 0) {
+ // First time through the loop; set access and representation.
+ access = new_access;
+ representation = new_representation;
+ } else if (!representation.IsCompatibleForLoad(new_representation)) {
+ // Representations did not match.
+ break;
+ } else if (access.offset() != new_access.offset()) {
+ // Offsets did not match.
+ break;
+ } else if (access.IsInobject() != new_access.IsInobject()) {
+ // In-objectness did not match.
+ break;
}
}
- if (instr == NULL) {
+ if (count == types->length()) {
+ // Everything matched; can use monomorphic load.
+ AddInstruction(HCheckMaps::New(object, types, zone()));
+ instr = BuildLoadNamedField(object, access, representation);
+ } else {
+ // Something did not match; must use a polymorphic load.
HValue* context = environment()->LookupContext();
instr = new(zone()) HLoadNamedFieldPolymorphic(
context, object, types, name, zone());
@@ -7407,7 +6481,6 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
- expr->RecordTypeFeedback(oracle(), zone());
CHECK_ALIVE(VisitForValue(prop->obj()));
if (prop->key()->IsPropertyName()) {
@@ -7553,7 +6626,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
if (var->mode() == CONST) {
return Bailout("unsupported const compound assignment");
}
- Bind(var, Top());
+ BindIfLive(var, Top());
break;
case Variable::CONTEXT: {
@@ -7605,8 +6678,6 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
return ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
- prop->RecordTypeFeedback(oracle(), zone());
-
if (prop->key()->IsPropertyName()) {
// Named property.
CHECK_ALIVE(VisitForValue(prop->obj()));
@@ -7688,7 +6759,6 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Push(load);
if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
-
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
HValue* left = Pop();
@@ -7699,7 +6769,6 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
AddSimulate(operation->id(), REMOVABLE_SIMULATE);
}
- expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
RelocInfo::kNoPosition,
true, // is_store
@@ -7782,7 +6851,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
// permitted.
CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
HValue* value = Pop();
- Bind(var, value);
+ BindIfLive(var, value);
return ast_context()->ReturnValue(value);
}
@@ -7873,40 +6942,22 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
}
-HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
+HLoadNamedField* HGraphBuilder::BuildLoadNamedField(
HValue* object,
- Handle<Map> map,
- LookupResult* lookup) {
- int index = lookup->GetLocalFieldIndexFromMap(*map);
- // Negative property indices are in-object properties, indexed from the end of
- // the fixed part of the object. Non-negative property indices are in the
- // properties array.
- int inobject = index < 0;
- Representation representation = lookup->representation();
- int offset = inobject
- ? index * kPointerSize + map->instance_size()
- : index * kPointerSize + FixedArray::kHeaderSize;
- return DoBuildLoadNamedField(object, inobject, representation, offset);
-}
-
-
-HLoadNamedField* HGraphBuilder::DoBuildLoadNamedField(
- HValue* object,
- bool inobject,
- Representation representation,
- int offset) {
+ HObjectAccess access,
+ Representation representation) {
bool load_double = false;
if (representation.IsDouble()) {
representation = Representation::Tagged();
load_double = FLAG_track_double_fields;
}
HLoadNamedField* field =
- new(zone()) HLoadNamedField(object, inobject, representation, offset);
+ new(zone()) HLoadNamedField(object, access, NULL, representation);
if (load_double) {
AddInstruction(field);
field->set_type(HType::HeapNumber());
- return new(zone()) HLoadNamedField(
- field, true, Representation::Double(), HeapNumber::kValueOffset);
+ return new(zone()) HLoadNamedField(field,
+ HObjectAccess::ForHeapNumberValue(), NULL, Representation::Double());
}
return field;
}
@@ -7947,7 +6998,8 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
if (name->Equals(isolate()->heap()->length_string())) {
if (map->instance_type() == JS_ARRAY_TYPE) {
AddCheckMapsWithTransitions(object, map);
- return HLoadNamedField::NewArrayLength(zone(), object, object);
+ return new(zone()) HLoadNamedField(object,
+ HObjectAccess::ForArrayLength());
}
}
@@ -7955,7 +7007,9 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsField()) {
AddCheckMap(object, map);
- return BuildLoadNamedField(object, map, &lookup);
+ return BuildLoadNamedField(object,
+ HObjectAccess::ForField(map, &lookup, name),
+ ComputeLoadStoreRepresentation(map, &lookup));
}
// Handle a load of a constant known function.
@@ -7974,9 +7028,11 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
AddCheckMap(object, map);
AddInstruction(
new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
- HValue* holder_value = AddInstruction(
- new(zone()) HConstant(holder, Representation::Tagged()));
- return BuildLoadNamedField(holder_value, holder_map, &lookup);
+ HValue* holder_value = AddInstruction(new(zone())
+ HConstant(holder, Representation::Tagged()));
+ return BuildLoadNamedField(holder_value,
+ HObjectAccess::ForField(holder_map, &lookup, name),
+ ComputeLoadStoreRepresentation(map, &lookup));
}
// Handle a load of a constant function somewhere in the prototype chain.
@@ -8170,6 +7226,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
// If only one map is left after transitioning, handle this case
// monomorphically.
+ ASSERT(num_untransitionable_maps >= 1);
if (num_untransitionable_maps == 1) {
HInstruction* instr = NULL;
if (untransitionable_map->has_slow_elements_kind()) {
@@ -8252,11 +7309,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
current_block()->Finish(typecheck);
set_current_block(if_jsarray);
- HInstruction* length;
- length = AddInstruction(
- HLoadNamedField::NewArrayLength(zone(), object, typecheck,
- HType::Smi()));
- checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY);
+ HInstruction* length = AddLoad(object, HObjectAccess::ForArrayLength(),
+ typecheck, Representation::Smi());
+ length->set_type(HType::Smi());
+
+ checked_key = AddBoundsCheck(key, length);
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind_branch,
elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE));
@@ -8274,7 +7331,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY);
+ checked_key = AddBoundsCheck(key, length);
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind_branch,
elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE));
@@ -8412,9 +7469,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
// Number of arguments without receiver.
int argument_count = environment()->
arguments_environment()->parameter_count() - 1;
- result = new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(argument_count), isolate()),
- Representation::Integer32());
+ result = new(zone()) HConstant(argument_count);
}
} else {
Push(graph()->GetArgumentsObject());
@@ -8437,8 +7492,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
int argument_count = environment()->
arguments_environment()->parameter_count() - 1;
HInstruction* length = AddInstruction(new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(argument_count), isolate()),
- Representation::Integer32()));
+ argument_count));
HInstruction* checked_key = AddBoundsCheck(key, length);
result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
}
@@ -8452,7 +7506,6 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- expr->RecordTypeFeedback(oracle(), zone());
if (TryArgumentsAccess(expr)) return;
@@ -8943,19 +7996,15 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
// After this point, we've made a decision to inline this function (so
// TryInline should always return true).
- // Save the pending call context and type feedback oracle. Set up new ones
- // for the inlined function.
+ // Type-check the inlined function.
ASSERT(target_shared->has_deoptimization_support());
- Handle<Code> unoptimized_code(target_shared->code());
- TypeFeedbackOracle target_oracle(
- unoptimized_code,
- Handle<Context>(target->context()->native_context()),
- isolate(),
- zone());
+ AstTyper::Type(&target_info);
+
+ // Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
- this, &target_info, &target_oracle, inlining_kind);
+ this, &target_info, inlining_kind);
HConstant* undefined = graph()->GetConstantUndefined();
bool undefined_receiver = HEnvironment::UseUndefinedReceiver(
@@ -9002,7 +8051,8 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
function_state()->inlining_kind(),
function->scope()->arguments(),
arguments_values,
- undefined_receiver);
+ undefined_receiver,
+ zone());
function_state()->set_entry(enter_inlined);
AddInstruction(enter_inlined);
@@ -9029,6 +8079,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
// Update inlined nodes count.
inlined_count_ += nodes_added;
+ Handle<Code> unoptimized_code(target_shared->code());
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Handle<TypeFeedbackInfo> type_info(
TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
@@ -9081,6 +8132,8 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
HBasicBlock* if_true = inlined_test_context()->if_true();
HBasicBlock* if_false = inlined_test_context()->if_false();
+ HEnterInlined* entry = function_state()->entry();
+
// Pop the return test context from the expression context stack.
ASSERT(ast_context() == inlined_test_context());
ClearInlinedTestContext();
@@ -9088,11 +8141,13 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
// Forward to the real test context.
if (if_true->HasPredecessor()) {
+ entry->RegisterReturnTarget(if_true, zone());
if_true->SetJoinId(ast_id);
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
if_true->Goto(true_target, function_state());
}
if (if_false->HasPredecessor()) {
+ entry->RegisterReturnTarget(if_false, zone());
if_false->SetJoinId(ast_id);
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
if_false->Goto(false_target, function_state());
@@ -9101,6 +8156,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
return true;
} else if (function_return()->HasPredecessor()) {
+ function_state()->entry()->RegisterReturnTarget(function_return(), zone());
function_return()->SetJoinId(ast_id);
set_current_block(function_return());
} else {
@@ -9245,7 +8301,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* context = environment()->LookupContext();
ASSERT(!expr->holder().is_null());
AddInstruction(new(zone()) HCheckPrototypeMaps(
- oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
+ Call::GetPrototypeForPrimitiveCheck(STRING_CHECK,
+ expr->holder()->GetIsolate()),
expr->holder(),
zone()));
HInstruction* char_code =
@@ -9311,10 +8368,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
result =
HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
} else if (exponent == -0.5) {
- HConstant* double_one =
- new(zone()) HConstant(Handle<Object>(Smi::FromInt(1),
- isolate()),
- Representation::Double());
+ HConstant* double_one = new(zone()) HConstant(
+ 1, Representation::Double());
AddInstruction(double_one);
HInstruction* sqrt =
HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
@@ -9407,7 +8462,7 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
VariableProxy* arg_two = args->at(1)->AsVariableProxy();
if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
- HValue* arg_two_value = environment()->Lookup(arg_two->var());
+ HValue* arg_two_value = LookupAndMakeLive(arg_two->var());
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
// Found pattern f.apply(receiver, arguments).
@@ -9560,8 +8615,6 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
}
// Named function call.
- expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
-
if (TryCallApply(expr)) return;
CHECK_ALIVE(VisitForValue(prop->obj()));
@@ -9627,7 +8680,6 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
}
} else {
- expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
VariableProxy* proxy = expr->expression()->AsVariableProxy();
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
@@ -9755,7 +8807,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
- constructor->initial_map()->instance_size() < HAllocateObject::kMaxSize;
+ constructor->initial_map()->instance_size() < HAllocate::kMaxInlineSize &&
+ constructor->initial_map()->InitialPropertiesLength() == 0;
}
@@ -9763,9 +8816,9 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- expr->RecordTypeFeedback(oracle());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
HValue* context = environment()->LookupContext();
+ Factory* factory = isolate()->factory();
if (FLAG_inline_construct &&
expr->IsMonomorphic() &&
@@ -9785,20 +8838,84 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
constructor->shared()->CompleteInobjectSlackTracking();
}
- // Replace the constructor function with a newly allocated receiver.
- HInstruction* receiver = new(zone()) HAllocateObject(context, constructor);
- // Index of the receiver from the top of the expression stack.
+ // Calculate instance size from initial map of constructor.
+ ASSERT(constructor->has_initial_map());
+ Handle<Map> initial_map(constructor->initial_map());
+ int instance_size = initial_map->instance_size();
+ ASSERT(initial_map->InitialPropertiesLength() == 0);
+
+ // Allocate an instance of the implicit receiver object.
+ HValue* size_in_bytes =
+ AddInstruction(new(zone()) HConstant(instance_size,
+ Representation::Integer32()));
+
+ HAllocate::Flags flags = HAllocate::DefaultFlags();
+ if (FLAG_pretenuring_call_new &&
+ isolate()->heap()->ShouldGloballyPretenure()) {
+ flags = static_cast<HAllocate::Flags>(
+ flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ }
+
+ HInstruction* receiver =
+ AddInstruction(new(zone()) HAllocate(context,
+ size_in_bytes,
+ HType::JSObject(),
+ flags));
+ HAllocate::cast(receiver)->set_known_initial_map(initial_map);
+
+ // Load the initial map from the constructor.
+ HValue* constructor_value =
+ AddInstruction(new(zone()) HConstant(constructor,
+ Representation::Tagged()));
+ HValue* initial_map_value =
+ AddLoad(constructor_value, HObjectAccess::ForJSObjectOffset(
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Initialize map and fields of the newly allocated object.
+ { NoObservableSideEffectsScope no_effects(this);
+ ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+ AddStore(receiver,
+ HObjectAccess::ForJSObjectOffset(JSObject::kMapOffset),
+ initial_map_value);
+ HValue* empty_fixed_array =
+ AddInstruction(new(zone()) HConstant(factory->empty_fixed_array(),
+ Representation::Tagged()));
+ AddStore(receiver,
+ HObjectAccess::ForJSObjectOffset(JSObject::kPropertiesOffset),
+ empty_fixed_array);
+ AddStore(receiver,
+ HObjectAccess::ForJSObjectOffset(JSObject::kElementsOffset),
+ empty_fixed_array);
+ if (initial_map->inobject_properties() != 0) {
+ HConstant* undefined = graph()->GetConstantUndefined();
+ for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ AddStore(receiver,
+ HObjectAccess::ForJSObjectOffset(property_offset),
+ undefined);
+ }
+ }
+ }
+
+ // Replace the constructor function with a newly allocated receiver using
+ // the index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
- AddInstruction(receiver);
ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
environment()->SetExpressionStackAt(receiver_index, receiver);
if (TryInlineConstruct(expr, receiver)) return;
- // TODO(mstarzinger): For now we remove the previous HAllocateObject and
- // add HPushArgument for the arguments in case inlining failed. What we
- // actually should do is emit HInvokeFunction on the constructor instead
- // of using HCallNew as a fallback.
+ // TODO(mstarzinger): For now we remove the previous HAllocate and all
+ // corresponding instructions and instead add HPushArgument for the
+ // arguments in case inlining failed. What we actually should do is for
+ // inlining to try to build a subgraph without mutating the parent graph.
+ HInstruction* instr = current_block()->last();
+ while (instr != initial_map_value) {
+ HInstruction* prev_instr = instr->previous();
+ instr->DeleteAndReplaceWith(NULL);
+ instr = prev_instr;
+ }
+ initial_map_value->DeleteAndReplaceWith(NULL);
receiver->DeleteAndReplaceWith(NULL);
check->DeleteAndReplaceWith(NULL);
environment()->SetExpressionStackAt(receiver_index, function);
@@ -9818,19 +8935,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HCallNew* call;
if (use_call_new_array) {
- // TODO(mvstanton): It would be better to use the already created global
- // property cell that is shared by full code gen. That way, any transition
- // information that happened after crankshaft won't be lost. The right
- // way to do that is to begin passing the cell to the type feedback oracle
- // instead of just the value in the cell. Do this in a follow-up checkin.
- Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
- ASSERT(feedback->IsSmi());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(feedback);
-
- // TODO(mvstanton): Here we should probably insert code to check if the
- // type cell elements kind is different from when we compiled, and deopt
- // in that case. Do this in a follow-up checin.
+ Handle<JSGlobalPropertyCell> cell = expr->allocation_info_cell();
call = new(zone()) HCallNewArray(context, constructor, argument_count,
cell);
} else {
@@ -9912,6 +9017,7 @@ void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
}
}
+
void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
Property* prop = expr->expression()->AsProperty();
VariableProxy* proxy = expr->expression()->AsVariableProxy();
@@ -9968,7 +9074,7 @@ void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
HValue* context = environment()->LookupContext();
HInstruction* instr =
HMul::New(zone(), context, value, graph()->GetConstantMinus1());
- TypeInfo info = oracle()->UnaryType(expr);
+ TypeInfo info = expr->type();
Representation rep = ToRepresentation(info);
if (info.IsUninitialized()) {
AddSoftDeoptimize();
@@ -9985,7 +9091,7 @@ void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
- TypeInfo info = oracle()->UnaryType(expr);
+ TypeInfo info = expr->type();
if (info.IsUninitialized()) {
AddSoftDeoptimize();
}
@@ -10042,7 +9148,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
bool returns_original_input,
CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
- TypeInfo info = oracle()->IncrementType(expr);
+ TypeInfo info = expr->type();
Representation rep = ToRepresentation(info);
if (rep.IsTagged()) {
rep = Representation::Integer32();
@@ -10118,7 +9224,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
case Variable::PARAMETER:
case Variable::LOCAL:
- Bind(var, after);
+ BindIfLive(var, after);
break;
case Variable::CONTEXT: {
@@ -10156,7 +9262,6 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
} else {
// Argument of the count operation is a property.
ASSERT(prop != NULL);
- prop->RecordTypeFeedback(oracle(), zone());
if (prop->key()->IsPropertyName()) {
// Named property.
@@ -10239,7 +9344,6 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
after = BuildIncrement(returns_original_input, expr);
input = environment()->ExpressionStackAt(0);
- expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
RelocInfo::kNoPosition,
true, // is_store
@@ -10274,7 +9378,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
if (i < 0 || i >= s->length()) {
return new(zone()) HConstant(OS::nan_value(), Representation::Double());
}
- return new(zone()) HConstant(s->Get(i), Representation::Integer32());
+ return new(zone()) HConstant(s->Get(i));
}
}
BuildCheckNonSmi(string);
@@ -10348,8 +9452,11 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
HValue* left,
HValue* right) {
HValue* context = environment()->LookupContext();
- TypeInfo left_info, right_info, result_info, combined_info;
- oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
+ TypeInfo left_info = expr->left_type();
+ TypeInfo right_info = expr->right_type();
+ TypeInfo result_info = expr->result_type();
+ bool has_fixed_right_arg = expr->has_fixed_right_arg();
+ int fixed_right_arg_value = expr->fixed_right_arg_value();
Representation left_rep = ToRepresentation(left_info);
Representation right_rep = ToRepresentation(right_info);
Representation result_rep = ToRepresentation(result_info);
@@ -10379,7 +9486,12 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
instr = HMul::New(zone(), context, left, right);
break;
case Token::MOD:
- instr = HMod::New(zone(), context, left, right);
+ instr = HMod::New(zone(),
+ context,
+ left,
+ right,
+ has_fixed_right_arg,
+ fixed_right_arg_value);
break;
case Token::DIV:
instr = HDiv::New(zone(), context, left, right);
@@ -10507,8 +9619,7 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
- TypeFeedbackId test_id = expr->left()->test_id();
- ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
+ ToBooleanStub::Types expected(expr->left()->to_boolean_types());
HBranch* test = is_logical_and
? new(zone()) HBranch(left_value, eval_right, empty_block, expected)
: new(zone()) HBranch(left_value, empty_block, eval_right, expected);
@@ -10676,16 +9787,17 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(instr, expr->id());
}
- TypeInfo left_type, right_type, overall_type_info;
- oracle()->CompareType(expr, &left_type, &right_type, &overall_type_info);
- Representation combined_rep = ToRepresentation(overall_type_info);
+ TypeInfo left_type = expr->left_type();
+ TypeInfo right_type = expr->right_type();
+ TypeInfo overall_type = expr->overall_type();
+ Representation combined_rep = ToRepresentation(overall_type);
Representation left_rep = ToRepresentation(left_type);
Representation right_rep = ToRepresentation(right_type);
// Check if this expression was ever executed according to type feedback.
// Note that for the special typeof/null/undefined cases we get unknown here.
- if (overall_type_info.IsUninitialized()) {
+ if (overall_type.IsUninitialized()) {
AddSoftDeoptimize();
- overall_type_info = left_type = right_type = TypeInfo::Unknown();
+ overall_type = left_type = right_type = TypeInfo::Unknown();
}
CHECK_ALIVE(VisitForValue(expr->left()));
@@ -10757,12 +9869,12 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HIn* result = new(zone()) HIn(context, left, right);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
- } else if (overall_type_info.IsNonPrimitive()) {
+ } else if (overall_type.IsNonPrimitive()) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
// Can we get away with map check and not instance type check?
- Handle<Map> map = oracle()->GetCompareMap(expr);
+ Handle<Map> map = expr->map();
if (!map.is_null()) {
AddCheckMapsWithTransitions(left, map);
AddCheckMapsWithTransitions(right, map);
@@ -10784,7 +9896,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
default:
return Bailout("Unsupported non-primitive compare");
}
- } else if (overall_type_info.IsInternalizedString() &&
+ } else if (overall_type.IsInternalizedString() &&
Token::IsEqualityOp(op)) {
BuildCheckNonSmi(left);
AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone()));
@@ -10819,22 +9931,22 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- EqualityKind kind =
- expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
HIfContinuation continuation;
- TypeFeedbackId id = expr->CompareOperationFeedbackId();
CompareNilICStub::Types types;
- if (kind == kStrictEquality) {
- types.Add((nil == kNullValue) ? CompareNilICStub::NULL_TYPE :
- CompareNilICStub::UNDEFINED);
- } else {
- types = CompareNilICStub::Types(oracle()->CompareNilTypes(id));
- if (types.IsEmpty()) {
- types = CompareNilICStub::Types::FullCompare();
- }
+ if (expr->op() == Token::EQ_STRICT) {
+ IfBuilder if_nil(this);
+ if_nil.If<HCompareObjectEqAndBranch>(
+ value, (nil == kNullValue) ? graph()->GetConstantNull()
+ : graph()->GetConstantUndefined());
+ if_nil.Then();
+ if_nil.Else();
+ if_nil.CaptureContinuation(&continuation);
+ return ast_context()->ReturnContinuation(&continuation, expr->id());
}
- Handle<Map> map_handle(oracle()->CompareNilMonomorphicReceiverType(id));
- BuildCompareNil(value, kind, types, map_handle,
+ types = CompareNilICStub::Types(expr->compare_nil_types());
+ if (types.IsEmpty()) types = CompareNilICStub::Types::FullCompare();
+ Handle<Map> map_handle = expr->map();
+ BuildCompareNil(value, types, map_handle,
expr->position(), &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
}
@@ -10867,8 +9979,7 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
// TODO(hpayer): add support for old data space
- if (FLAG_pretenure_literals &&
- isolate()->heap()->ShouldGloballyPretenure() &&
+ if (isolate()->heap()->ShouldGloballyPretenure() &&
data_size == 0) {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
@@ -10896,15 +10007,7 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
int* offset,
AllocationSiteMode mode) {
Zone* zone = this->zone();
- Factory* factory = isolate()->factory();
-
- HInstruction* original_boilerplate = AddInstruction(new(zone) HConstant(
- original_boilerplate_object, Representation::Tagged()));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- boilerplate_object->map()->CanTrackAllocationSite();
- // Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(boilerplate_object->elements());
Handle<FixedArrayBase> original_elements(
original_boilerplate_object->elements());
@@ -10918,138 +10021,36 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
*offset += object_size + elements_size;
- HValue* object_elements = BuildCopyObjectHeader(boilerplate_object, target,
+ // Copy object elements if non-COW.
+ HValue* object_elements = BuildEmitObjectHeader(boilerplate_object, target,
object_offset, elements_offset, elements_size);
+ if (object_elements != NULL) {
+ BuildEmitElements(elements, original_elements, kind, object_elements,
+ target, offset);
+ }
// Copy in-object properties.
HValue* object_properties =
AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
+ BuildEmitInObjectProperties(boilerplate_object, original_boilerplate_object,
+ object_properties, target, offset);
- Handle<DescriptorArray> descriptors(
- boilerplate_object->map()->instance_descriptors());
- int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
-
- int copied_fields = 0;
- for (int i = 0; i < limit; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != FIELD) continue;
- copied_fields++;
- int index = descriptors->GetFieldIndex(i);
- int property_offset = boilerplate_object->GetInObjectPropertyOffset(index);
- Handle<Name> name(descriptors->GetKey(i));
- Handle<Object> value =
- Handle<Object>(boilerplate_object->InObjectPropertyAt(index),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- Handle<JSObject> original_value_object = Handle<JSObject>::cast(
- Handle<Object>(original_boilerplate_object->InObjectPropertyAt(index),
- isolate()));
- HInstruction* value_instruction =
- AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
- AddInstruction(new(zone) HStoreNamedField(
- object_properties, name, value_instruction, true,
- Representation::Tagged(), property_offset));
- BuildEmitDeepCopy(value_object, original_value_object, target,
- offset, DONT_TRACK_ALLOCATION_SITE);
- } else {
- Representation representation = details.representation();
- HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
- value, Representation::Tagged()));
- if (representation.IsDouble()) {
- HInstruction* double_box =
- AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
- BuildStoreMap(double_box, factory->heap_number_map());
- AddInstruction(new(zone) HStoreNamedField(
- double_box, name, value_instruction, true,
- Representation::Double(), HeapNumber::kValueOffset));
- value_instruction = double_box;
- *offset += HeapNumber::kSize;
- }
- AddInstruction(new(zone) HStoreNamedField(
- object_properties, name, value_instruction, true,
- Representation::Tagged(), property_offset));
- }
- }
-
- int inobject_properties = boilerplate_object->map()->inobject_properties();
- HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
- factory->one_pointer_filler_map(), Representation::Tagged()));
- for (int i = copied_fields; i < inobject_properties; i++) {
- AddInstruction(new(zone) HStoreNamedField(
- object_properties, factory->unknown_field_string(), value_instruction,
- true, Representation::Tagged(),
- boilerplate_object->GetInObjectPropertyOffset(i)));
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
+ // Create allocation site info.
+ if (mode == TRACK_ALLOCATION_SITE &&
+ boilerplate_object->map()->CanTrackAllocationSite()) {
+ elements_offset += AllocationSiteInfo::kSize;
+ *offset += AllocationSiteInfo::kSize;
+ HInstruction* original_boilerplate = AddInstruction(new(zone) HConstant(
+ original_boilerplate_object, Representation::Tagged()));
BuildCreateAllocationSiteInfo(target, JSArray::kSize, original_boilerplate);
}
-
- if (object_elements != NULL) {
- HInstruction* boilerplate_elements = AddInstruction(new(zone) HConstant(
- elements, Representation::Tagged()));
-
- int elements_length = elements->length();
- HValue* object_elements_length =
- AddInstruction(new(zone) HConstant(
- elements_length, Representation::Integer32()));
-
- BuildInitializeElements(object_elements, kind, object_elements_length);
-
- // Copy elements backing store content.
- if (elements->IsFixedDoubleArray()) {
- for (int i = 0; i < elements_length; i++) {
- HValue* key_constant =
- AddInstruction(new(zone) HConstant(i, Representation::Integer32()));
- HInstruction* value_instruction =
- AddInstruction(new(zone) HLoadKeyed(
- boilerplate_elements, key_constant, NULL, kind));
- AddInstruction(new(zone) HStoreKeyed(
- object_elements, key_constant, value_instruction, kind));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- Handle<FixedArray> original_fast_elements =
- Handle<FixedArray>::cast(original_elements);
- for (int i = 0; i < elements_length; i++) {
- Handle<Object> value(fast_elements->get(i), isolate());
- HValue* key_constant =
- AddInstruction(new(zone) HConstant(i, Representation::Integer32()));
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- Handle<JSObject> original_value_object = Handle<JSObject>::cast(
- Handle<Object>(original_fast_elements->get(i), isolate()));
- HInstruction* value_instruction =
- AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
- AddInstruction(new(zone) HStoreKeyed(
- object_elements, key_constant, value_instruction, kind));
- BuildEmitDeepCopy(value_object, original_value_object, target,
- offset, DONT_TRACK_ALLOCATION_SITE);
- } else {
- HInstruction* value_instruction =
- AddInstruction(new(zone) HLoadKeyed(
- boilerplate_elements, key_constant, NULL, kind));
- AddInstruction(new(zone) HStoreKeyed(
- object_elements, key_constant, value_instruction, kind));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
}
-HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
+HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
Handle<JSObject> boilerplate_object,
HInstruction* target,
int object_offset,
@@ -11057,13 +10058,12 @@ HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
int elements_size) {
ASSERT(boilerplate_object->properties()->length() == 0);
Zone* zone = this->zone();
- Factory* factory = isolate()->factory();
HValue* result = NULL;
HValue* object_header =
AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
Handle<Map> boilerplate_object_map(boilerplate_object->map());
- BuildStoreMap(object_header, boilerplate_object_map);
+ AddStoreMapConstant(object_header, boilerplate_object_map);
HInstruction* elements;
if (elements_size == 0) {
@@ -11076,23 +10076,15 @@ HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
target, elements_offset));
result = elements;
}
- HInstruction* elements_store = AddInstruction(new(zone) HStoreNamedField(
- object_header,
- factory->elements_field_string(),
- elements,
- true, Representation::Tagged(), JSObject::kElementsOffset));
- elements_store->SetGVNFlag(kChangesElementsPointer);
+ AddStore(object_header, HObjectAccess::ForElementsPointer(), elements);
Handle<Object> properties_field =
Handle<Object>(boilerplate_object->properties(), isolate());
ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
HInstruction* properties = AddInstruction(new(zone) HConstant(
properties_field, Representation::None()));
- AddInstruction(new(zone) HStoreNamedField(object_header,
- factory->empty_string(),
- properties, true,
- Representation::Tagged(),
- JSObject::kPropertiesOffset));
+ HObjectAccess access = HObjectAccess::ForPropertiesPointer();
+ AddStore(object_header, access, properties);
if (boilerplate_object->IsJSArray()) {
Handle<JSArray> boilerplate_array =
@@ -11101,22 +10093,178 @@ HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
Handle<Object>(boilerplate_array->length(), isolate());
HInstruction* length = AddInstruction(new(zone) HConstant(
length_field, Representation::None()));
+
ASSERT(boilerplate_array->length()->IsSmi());
Representation representation =
IsFastElementsKind(boilerplate_array->GetElementsKind())
? Representation::Smi() : Representation::Tagged();
- HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
- object_header,
- factory->length_field_string(),
- length,
- true, representation, JSArray::kLengthOffset));
- length_store->SetGVNFlag(kChangesArrayLengths);
+ AddStore(object_header, HObjectAccess::ForArrayLength(),
+ length, representation);
}
return result;
}
+void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
+ Handle<JSObject> boilerplate_object,
+ Handle<JSObject> original_boilerplate_object,
+ HValue* object_properties,
+ HInstruction* target,
+ int* offset) {
+ Zone* zone = this->zone();
+ Handle<DescriptorArray> descriptors(
+ boilerplate_object->map()->instance_descriptors());
+ int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
+
+ int copied_fields = 0;
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ copied_fields++;
+ int index = descriptors->GetFieldIndex(i);
+ int property_offset = boilerplate_object->GetInObjectPropertyOffset(index);
+ Handle<Name> name(descriptors->GetKey(i));
+ Handle<Object> value =
+ Handle<Object>(boilerplate_object->InObjectPropertyAt(index),
+ isolate());
+
+ // The access for the store depends on the type of the boilerplate.
+ HObjectAccess access = boilerplate_object->IsJSArray() ?
+ HObjectAccess::ForJSArrayOffset(property_offset) :
+ HObjectAccess::ForJSObjectOffset(property_offset);
+
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ Handle<JSObject> original_value_object = Handle<JSObject>::cast(
+ Handle<Object>(original_boilerplate_object->InObjectPropertyAt(index),
+ isolate()));
+ HInstruction* value_instruction =
+ AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+
+ AddStore(object_properties, access, value_instruction);
+
+ BuildEmitDeepCopy(value_object, original_value_object, target,
+ offset, DONT_TRACK_ALLOCATION_SITE);
+ } else {
+ Representation representation = details.representation();
+ HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
+ value, Representation::Tagged()));
+
+ if (representation.IsDouble()) {
+ // Allocate a HeapNumber box and store the value into it.
+ HInstruction* double_box =
+ AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+ AddStoreMapConstant(double_box,
+ isolate()->factory()->heap_number_map());
+ AddStore(double_box, HObjectAccess::ForHeapNumberValue(),
+ value_instruction, Representation::Double());
+ value_instruction = double_box;
+ *offset += HeapNumber::kSize;
+ }
+
+ AddStore(object_properties, access, value_instruction);
+ }
+ }
+
+ int inobject_properties = boilerplate_object->map()->inobject_properties();
+ HInstruction* value_instruction = AddInstruction(new(zone)
+ HConstant(isolate()->factory()->one_pointer_filler_map(),
+ Representation::Tagged()));
+ for (int i = copied_fields; i < inobject_properties; i++) {
+ ASSERT(boilerplate_object->IsJSObject());
+ int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
+ HObjectAccess access = HObjectAccess::ForJSObjectOffset(property_offset);
+ AddStore(object_properties, access, value_instruction);
+ }
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitElements(
+ Handle<FixedArrayBase> elements,
+ Handle<FixedArrayBase> original_elements,
+ ElementsKind kind,
+ HValue* object_elements,
+ HInstruction* target,
+ int* offset) {
+ Zone* zone = this->zone();
+
+ int elements_length = elements->length();
+ HValue* object_elements_length =
+ AddInstruction(new(zone) HConstant(elements_length));
+
+ BuildInitializeElementsHeader(object_elements, kind, object_elements_length);
+
+ // Copy elements backing store content.
+ if (elements->IsFixedDoubleArray()) {
+ BuildEmitFixedDoubleArray(elements, kind, object_elements);
+ } else if (elements->IsFixedArray()) {
+ BuildEmitFixedArray(elements, original_elements, kind, object_elements,
+ target, offset);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
+ Handle<FixedArrayBase> elements,
+ ElementsKind kind,
+ HValue* object_elements) {
+ Zone* zone = this->zone();
+ HInstruction* boilerplate_elements = AddInstruction(new(zone) HConstant(
+ elements, Representation::Tagged()));
+ int elements_length = elements->length();
+ for (int i = 0; i < elements_length; i++) {
+ HValue* key_constant = AddInstruction(new(zone) HConstant(i));
+ HInstruction* value_instruction =
+ AddInstruction(new(zone) HLoadKeyed(
+ boilerplate_elements, key_constant, NULL, kind, ALLOW_RETURN_HOLE));
+ HInstruction* store = AddInstruction(new(zone) HStoreKeyed(
+ object_elements, key_constant, value_instruction, kind));
+ store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ }
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitFixedArray(
+ Handle<FixedArrayBase> elements,
+ Handle<FixedArrayBase> original_elements,
+ ElementsKind kind,
+ HValue* object_elements,
+ HInstruction* target,
+ int* offset) {
+ Zone* zone = this->zone();
+ HInstruction* boilerplate_elements = AddInstruction(new(zone) HConstant(
+ elements, Representation::Tagged()));
+ int elements_length = elements->length();
+ Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+ Handle<FixedArray> original_fast_elements =
+ Handle<FixedArray>::cast(original_elements);
+ for (int i = 0; i < elements_length; i++) {
+ Handle<Object> value(fast_elements->get(i), isolate());
+ HValue* key_constant = AddInstruction(new(zone) HConstant(i));
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ Handle<JSObject> original_value_object = Handle<JSObject>::cast(
+ Handle<Object>(original_fast_elements->get(i), isolate()));
+ HInstruction* value_instruction =
+ AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+ AddInstruction(new(zone) HStoreKeyed(
+ object_elements, key_constant, value_instruction, kind));
+ BuildEmitDeepCopy(value_object, original_value_object, target,
+ offset, DONT_TRACK_ALLOCATION_SITE);
+ } else {
+ HInstruction* value_instruction =
+ AddInstruction(new(zone) HLoadKeyed(
+ boilerplate_elements, key_constant, NULL, kind,
+ ALLOW_RETURN_HOLE));
+ AddInstruction(new(zone) HStoreKeyed(
+ object_elements, key_constant, value_instruction, kind));
+ }
+ }
+}
+
void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -11201,7 +10349,7 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
case Variable::LOCAL: {
CHECK_ALIVE(VisitForValue(declaration->fun()));
HValue* value = Pop();
- environment()->Bind(variable, value);
+ BindIfLive(variable, value);
break;
}
case Variable::CONTEXT: {
@@ -11459,9 +10607,6 @@ void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
HValue* value = Pop();
HValue* index = Pop();
HValue* string = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* char_code = BuildStringCharCodeAt(context, string, index);
- AddInstruction(char_code);
HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
String::TWO_BYTE_ENCODING, string, index, value);
return ast_context()->ReturnInstruction(result, call->id());
@@ -11497,13 +10642,8 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
// Create in-object property store to kValueOffset.
set_current_block(if_js_value);
- Handle<String> name = isolate()->factory()->undefined_string();
- AddInstruction(new(zone()) HStoreNamedField(object,
- name,
- value,
- true, // in-object store.
- Representation::Tagged(),
- JSValue::kValueOffset));
+ AddStore(object,
+ HObjectAccess::ForJSObjectOffset(JSValue::kValueOffset), value);
if_js_value->Goto(join);
join->SetJoinId(call->id());
set_current_block(join);
@@ -11790,8 +10930,8 @@ void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
// Support for generators.
-void HOptimizedGraphBuilder::GenerateGeneratorSend(CallRuntime* call) {
- return Bailout("inlined runtime function: GeneratorSend");
+void HOptimizedGraphBuilder::GenerateGeneratorNext(CallRuntime* call) {
+ return Bailout("inlined runtime function: GeneratorNext");
}
@@ -12120,14 +11260,16 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
ASSERT(!FLAG_parallel_recompilation);
- ALLOW_HANDLE_DEREF(chunk->isolate(), "debug output");
+ AllowHandleDereference allow_deref;
+ AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, chunk->graph(), chunk);
}
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
ASSERT(!FLAG_parallel_recompilation);
- ALLOW_HANDLE_DEREF(graph->isolate(), "debug output");
+ AllowHandleDereference allow_deref;
+ AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, graph, NULL);
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index b053fc71c5..ad89e505a3 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -34,7 +34,6 @@
#include "ast.h"
#include "compiler.h"
#include "hydrogen-instructions.h"
-#include "type-info.h"
#include "zone.h"
#include "scopes.h"
@@ -67,7 +66,6 @@ class HBasicBlock: public ZoneObject {
HInstruction* first() const { return first_; }
HInstruction* last() const { return last_; }
void set_last(HInstruction* instr) { last_ = instr; }
- HInstruction* GetLastInstruction();
HControlInstruction* end() const { return end_; }
HLoopInformation* loop_information() const { return loop_information_; }
const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
@@ -110,9 +108,13 @@ class HBasicBlock: public ZoneObject {
int LoopNestingDepth() const;
void SetInitialEnvironment(HEnvironment* env);
- void ClearEnvironment() { last_environment_ = NULL; }
+ void ClearEnvironment() {
+ ASSERT(IsFinished());
+ ASSERT(end()->SuccessorCount() == 0);
+ last_environment_ = NULL;
+ }
bool HasEnvironment() const { return last_environment_ != NULL; }
- void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
+ void UpdateEnvironment(HEnvironment* env);
HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
void set_parent_loop_header(HBasicBlock* block) {
@@ -156,7 +158,11 @@ class HBasicBlock: public ZoneObject {
// Simulate (caller's environment)
// Goto (target block)
bool IsInlineReturnTarget() const { return is_inline_return_target_; }
- void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
+ void MarkAsInlineReturnTarget(HBasicBlock* inlined_entry_block) {
+ is_inline_return_target_ = true;
+ inlined_entry_block_ = inlined_entry_block;
+ }
+ HBasicBlock* inlined_entry_block() { return inlined_entry_block_; }
bool IsDeoptimizing() const { return is_deoptimizing_; }
void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
@@ -199,10 +205,12 @@ class HBasicBlock: public ZoneObject {
int last_instruction_index_;
ZoneList<int> deleted_phis_;
HBasicBlock* parent_loop_header_;
- bool is_inline_return_target_;
- bool is_deoptimizing_;
- bool dominates_loop_successors_;
- bool is_osr_entry_;
+ // For blocks marked as inline return target: the block with HEnterInlined.
+ HBasicBlock* inlined_entry_block_;
+ bool is_inline_return_target_ : 1;
+ bool is_deoptimizing_ : 1;
+ bool dominates_loop_successors_ : 1;
+ bool is_osr_entry_ : 1;
};
@@ -286,6 +294,7 @@ class HGraph: public ZoneObject {
void RestoreActualValues();
void DeadCodeElimination(const char *phase_name);
void PropagateDeoptimizingMark();
+ void AnalyzeAndPruneEnvironmentLiveness();
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
@@ -303,8 +312,6 @@ class HGraph: public ZoneObject {
HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
HConstant* GetConstant0();
HConstant* GetConstant1();
- HConstant* GetConstantSmi0();
- HConstant* GetConstantSmi1();
HConstant* GetConstantMinus1();
HConstant* GetConstantTrue();
HConstant* GetConstantFalse();
@@ -363,6 +370,13 @@ class HGraph: public ZoneObject {
return type_change_checksum_;
}
+ void update_maximum_environment_size(int environment_size) {
+ if (environment_size > maximum_environment_size_) {
+ maximum_environment_size_ = environment_size;
+ }
+ }
+ int maximum_environment_size() { return maximum_environment_size_; }
+
bool use_optimistic_licm() {
return use_optimistic_licm_;
}
@@ -403,10 +417,8 @@ class HGraph: public ZoneObject {
}
private:
- HConstant* GetConstantInt32(SetOncePointer<HConstant>* pointer,
- int32_t integer_value);
- HConstant* GetConstantSmi(SetOncePointer<HConstant>* pointer,
- int32_t integer_value);
+ HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
+ int32_t integer_value);
void MarkLive(HValue* ref, HValue* instr, ZoneList<HValue*>* worklist);
void MarkLiveInstructions();
@@ -439,8 +451,6 @@ class HGraph: public ZoneObject {
SetOncePointer<HConstant> undefined_constant_;
SetOncePointer<HConstant> constant_0_;
SetOncePointer<HConstant> constant_1_;
- SetOncePointer<HConstant> constant_smi_0_;
- SetOncePointer<HConstant> constant_smi_1_;
SetOncePointer<HConstant> constant_minus1_;
SetOncePointer<HConstant> constant_true_;
SetOncePointer<HConstant> constant_false_;
@@ -460,6 +470,7 @@ class HGraph: public ZoneObject {
bool has_soft_deoptimize_;
bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
+ int maximum_environment_size_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -521,6 +532,10 @@ class HEnvironment: public ZoneObject {
return parameter_count() + specials_count() + local_count();
}
+ int first_local_index() const {
+ return parameter_count() + specials_count();
+ }
+
void Bind(Variable* variable, HValue* value) {
Bind(IndexFor(variable), value);
}
@@ -618,6 +633,22 @@ class HEnvironment: public ZoneObject {
values_[index] = value;
}
+ // Map a variable to an environment index. Parameter indices are shifted
+ // by 1 (receiver is parameter index -1 but environment index 0).
+ // Stack-allocated local indices are shifted by the number of parameters.
+ int IndexFor(Variable* variable) const {
+ ASSERT(variable->IsStackAllocated());
+ int shift = variable->IsParameter()
+ ? 1
+ : parameter_count_ + specials_count_;
+ return variable->index() + shift;
+ }
+
+ bool is_local_index(int i) const {
+ return i >= first_local_index() &&
+ i < first_expression_index();
+ }
+
void PrintTo(StringStream* stream);
void PrintToStd();
@@ -645,17 +676,6 @@ class HEnvironment: public ZoneObject {
void Initialize(int parameter_count, int local_count, int stack_height);
void Initialize(const HEnvironment* other);
- // Map a variable to an environment index. Parameter indices are shifted
- // by 1 (receiver is parameter index -1 but environment index 0).
- // Stack-allocated local indices are shifted by the number of parameters.
- int IndexFor(Variable* variable) const {
- ASSERT(variable->IsStackAllocated());
- int shift = variable->IsParameter()
- ? 1
- : parameter_count_ + specials_count_;
- return variable->index() + shift;
- }
-
Handle<JSFunction> closure_;
// Value array [parameters] [specials] [locals] [temporaries].
ZoneList<HValue*> values_;
@@ -798,12 +818,10 @@ class TestContext: public AstContext {
public:
TestContext(HOptimizedGraphBuilder* owner,
Expression* condition,
- TypeFeedbackOracle* oracle,
HBasicBlock* if_true,
HBasicBlock* if_false)
: AstContext(owner, Expression::kTest),
condition_(condition),
- oracle_(oracle),
if_true_(if_true),
if_false_(if_false) {
}
@@ -820,7 +838,6 @@ class TestContext: public AstContext {
}
Expression* condition() const { return condition_; }
- TypeFeedbackOracle* oracle() const { return oracle_; }
HBasicBlock* if_true() const { return if_true_; }
HBasicBlock* if_false() const { return if_false_; }
@@ -830,7 +847,6 @@ class TestContext: public AstContext {
void BuildBranch(HValue* value);
Expression* condition_;
- TypeFeedbackOracle* oracle_;
HBasicBlock* if_true_;
HBasicBlock* if_false_;
};
@@ -840,12 +856,10 @@ class FunctionState {
public:
FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- TypeFeedbackOracle* oracle,
InliningKind inlining_kind);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
- TypeFeedbackOracle* oracle() { return oracle_; }
AstContext* call_context() { return call_context_; }
InliningKind inlining_kind() const { return inlining_kind_; }
HBasicBlock* function_return() { return function_return_; }
@@ -871,7 +885,6 @@ class FunctionState {
HOptimizedGraphBuilder* owner_;
CompilationInfo* compilation_info_;
- TypeFeedbackOracle* oracle_;
// During function inlining, expression context of the call being
// inlined. NULL when not inlining.
@@ -966,11 +979,7 @@ class HGraphBuilder {
HInstruction* AddInstruction(HInstruction* instr);
void AddSimulate(BailoutId id,
RemovableSimulate removable = FIXED_SIMULATE);
- HBoundsCheck* AddBoundsCheck(
- HValue* index,
- HValue* length,
- BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY,
- Representation r = Representation::None());
+ HBoundsCheck* AddBoundsCheck(HValue* index, HValue* length);
HReturn* AddReturn(HValue* value);
@@ -992,11 +1001,6 @@ class HGraphBuilder {
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
// Building common constructs
- HLoadNamedField* DoBuildLoadNamedField(HValue* object,
- bool inobject,
- Representation representation,
- int offset);
-
HInstruction* BuildExternalArrayElementAccess(
HValue* external_elements,
HValue* checked_key,
@@ -1036,11 +1040,26 @@ class HGraphBuilder {
ElementsKind elements_kind,
bool is_store,
LoadKeyedHoleMode load_mode,
- KeyedAccessStoreMode store_mode,
- Representation checked_index_representation = Representation::None());
+ KeyedAccessStoreMode store_mode);
+
+ HLoadNamedField* AddLoad(
+ HValue *object,
+ HObjectAccess access,
+ HValue *typecheck = NULL,
+ Representation representation = Representation::Tagged());
+
+ HLoadNamedField* BuildLoadNamedField(
+ HValue* object,
+ HObjectAccess access,
+ Representation representation);
+
+ HStoreNamedField* AddStore(
+ HValue *object,
+ HObjectAccess access,
+ HValue *val,
+ Representation representation = Representation::Tagged());
- HInstruction* BuildStoreMap(HValue* object, HValue* map);
- HInstruction* BuildStoreMap(HValue* object, Handle<Map> map);
+ HStoreNamedField* AddStoreMapConstant(HValue *object, Handle<Map>);
HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck = NULL);
@@ -1198,8 +1217,7 @@ class HGraphBuilder {
HValue* BeginBody(
HValue* initial,
HValue* terminating,
- Token::Value token,
- Representation input_representation = Representation::Integer32());
+ Token::Value token);
void EndBody();
private:
@@ -1241,7 +1259,11 @@ class HGraphBuilder {
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* allocation_site_payload,
- AllocationSiteMode mode);
+ bool disable_allocation_sites);
+
+ JSArrayBuilder(HGraphBuilder* builder,
+ ElementsKind kind,
+ HValue* constructor_function);
HValue* AllocateEmptyArray();
HValue* AllocateArray(HValue* capacity, HValue* length_field,
@@ -1264,6 +1286,7 @@ class HGraphBuilder {
}
HValue* EmitMapCode(HValue* context);
+ HValue* EmitInternalMapCode();
HValue* EstablishEmptyArrayAllocationSize();
HValue* EstablishAllocationSize(HValue* length_node);
HValue* AllocateArray(HValue* size_in_bytes, HValue* capacity,
@@ -1273,6 +1296,7 @@ class HGraphBuilder {
ElementsKind kind_;
AllocationSiteMode mode_;
HValue* allocation_site_payload_;
+ HValue* constructor_function_;
HInnerAllocatedObject* elements_location_;
};
@@ -1280,13 +1304,13 @@ class HGraphBuilder {
ElementsKind kind,
HValue* capacity);
- void BuildInitializeElements(HValue* elements,
- ElementsKind kind,
- HValue* capacity);
+ void BuildInitializeElementsHeader(HValue* elements,
+ ElementsKind kind,
+ HValue* capacity);
- HValue* BuildAllocateAndInitializeElements(HValue* context,
- ElementsKind kind,
- HValue* capacity);
+ HValue* BuildAllocateElementsAndInitializeElementsHeader(HValue* context,
+ ElementsKind kind,
+ HValue* capacity);
// array must have been allocated with enough room for
// 1) the JSArray, 2) a AllocationSiteInfo if mode requires it,
@@ -1326,7 +1350,6 @@ class HGraphBuilder {
void BuildCompareNil(
HValue* value,
- EqualityKind kind,
CompareNilICStub::Types types,
Handle<Map> map,
int position,
@@ -1350,9 +1373,6 @@ class HGraphBuilder {
class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
public:
- enum BreakType { BREAK, CONTINUE };
- enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
-
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
// can have a separate lifetime.
@@ -1397,6 +1417,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
BreakAndContinueScope* next() { return next_; }
// Search the break stack for a break or continue target.
+ enum BreakType { BREAK, CONTINUE };
HBasicBlock* Get(BreakableStatement* stmt, BreakType type, int* drop_extra);
private:
@@ -1405,7 +1426,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
BreakAndContinueScope* next_;
};
- HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
+ explicit HOptimizedGraphBuilder(CompilationInfo* info);
virtual bool BuildGraph();
@@ -1423,8 +1444,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HBasicBlock* second,
BailoutId join_id);
- TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
-
FunctionState* function_state() const { return function_state_; }
void VisitDeclarations(ZoneList<Declaration*>* declarations);
@@ -1535,6 +1554,45 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* Top() const { return environment()->Top(); }
void Drop(int n) { environment()->Drop(n); }
void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
+ bool IsEligibleForEnvironmentLivenessAnalysis(Variable* var,
+ int index,
+ HValue* value,
+ HEnvironment* env) {
+ if (!FLAG_analyze_environment_liveness) return false;
+ // |this| and |arguments| are always live; zapping parameters isn't
+ // safe because function.arguments can inspect them at any time.
+ return !var->is_this() &&
+ !var->is_arguments() &&
+ !value->IsArgumentsObject() &&
+ env->is_local_index(index);
+ }
+ void BindIfLive(Variable* var, HValue* value) {
+ HEnvironment* env = environment();
+ int index = env->IndexFor(var);
+ env->Bind(index, value);
+ if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
+ HEnvironmentMarker* bind =
+ new(zone()) HEnvironmentMarker(HEnvironmentMarker::BIND, index);
+ AddInstruction(bind);
+#ifdef DEBUG
+ bind->set_closure(env->closure());
+#endif
+ }
+ }
+ HValue* LookupAndMakeLive(Variable* var) {
+ HEnvironment* env = environment();
+ int index = env->IndexFor(var);
+ HValue* value = env->Lookup(index);
+ if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
+ HEnvironmentMarker* lookup =
+ new(zone()) HEnvironmentMarker(HEnvironmentMarker::LOOKUP, index);
+ AddInstruction(lookup);
+#ifdef DEBUG
+ lookup->set_closure(env->closure());
+#endif
+ }
+ return value;
+ }
// The value of the arguments object is allowed in some but not most value
// contexts. (It's allowed in all effect contexts and disallowed in all
@@ -1692,9 +1750,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
bool is_store,
bool* has_side_effects);
- HLoadNamedField* BuildLoadNamedField(HValue* object,
- Handle<Map> map,
- LookupResult* result);
HInstruction* BuildLoadNamedGeneric(HValue* object,
Handle<String> name,
Property* expr);
@@ -1750,13 +1805,37 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
int* offset,
AllocationSiteMode mode);
- MUST_USE_RESULT HValue* BuildCopyObjectHeader(
+ MUST_USE_RESULT HValue* BuildEmitObjectHeader(
Handle<JSObject> boilerplat_object,
HInstruction* target,
int object_offset,
int elements_offset,
int elements_size);
+ void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
+ Handle<JSObject> original_boilerplate_object,
+ HValue* object_properties,
+ HInstruction* target,
+ int* offset);
+
+ void BuildEmitElements(Handle<FixedArrayBase> elements,
+ Handle<FixedArrayBase> original_elements,
+ ElementsKind kind,
+ HValue* object_elements,
+ HInstruction* target,
+ int* offset);
+
+ void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
+ ElementsKind kind,
+ HValue* object_elements);
+
+ void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
+ Handle<FixedArrayBase> original_elements,
+ ElementsKind kind,
+ HValue* object_elements,
+ HInstruction* target,
+ int* offset);
+
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
@@ -1798,90 +1877,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Zone* AstContext::zone() const { return owner_->zone(); }
-class HValueMap: public ZoneObject {
- public:
- explicit HValueMap(Zone* zone)
- : array_size_(0),
- lists_size_(0),
- count_(0),
- present_flags_(0),
- array_(NULL),
- lists_(NULL),
- free_list_head_(kNil) {
- ResizeLists(kInitialSize, zone);
- Resize(kInitialSize, zone);
- }
-
- void Kill(GVNFlagSet flags);
-
- void Add(HValue* value, Zone* zone) {
- present_flags_.Add(value->gvn_flags());
- Insert(value, zone);
- }
-
- HValue* Lookup(HValue* value) const;
-
- HValueMap* Copy(Zone* zone) const {
- return new(zone) HValueMap(zone, this);
- }
-
- bool IsEmpty() const { return count_ == 0; }
-
- private:
- // A linked list of HValue* values. Stored in arrays.
- struct HValueMapListElement {
- HValue* value;
- int next; // Index in the array of the next list element.
- };
- static const int kNil = -1; // The end of a linked list
-
- // Must be a power of 2.
- static const int kInitialSize = 16;
-
- HValueMap(Zone* zone, const HValueMap* other);
-
- void Resize(int new_size, Zone* zone);
- void ResizeLists(int new_size, Zone* zone);
- void Insert(HValue* value, Zone* zone);
- uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
-
- int array_size_;
- int lists_size_;
- int count_; // The number of values stored in the HValueMap.
- GVNFlagSet present_flags_; // All flags that are in any value in the
- // HValueMap.
- HValueMapListElement* array_; // Primary store - contains the first value
- // with a given hash. Colliding elements are stored in linked lists.
- HValueMapListElement* lists_; // The linked lists containing hash collisions.
- int free_list_head_; // Unused elements in lists_ are on the free list.
-};
-
-
-class HSideEffectMap BASE_EMBEDDED {
- public:
- HSideEffectMap();
- explicit HSideEffectMap(HSideEffectMap* other);
- HSideEffectMap& operator= (const HSideEffectMap& other);
-
- void Kill(GVNFlagSet flags);
-
- void Store(GVNFlagSet flags, HInstruction* instr);
-
- bool IsEmpty() const { return count_ == 0; }
-
- inline HInstruction* operator[](int i) const {
- ASSERT(0 <= i);
- ASSERT(i < kNumberOfTrackedSideEffects);
- return data_[i];
- }
- inline HInstruction* at(int i) const { return operator[](i); }
-
- private:
- int count_;
- HInstruction* data_[kNumberOfTrackedSideEffects];
-};
-
-
class HStatistics: public Malloced {
public:
HStatistics()
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index cccacf7820..2a0c920936 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -333,8 +333,7 @@ Immediate::Immediate(Handle<Object> handle) {
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
- ALLOW_HANDLE_DEREF(isolate,
- "using and embedding raw address, heap object check");
+ AllowDeferredHandleDereference using_raw_address;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
ASSERT(!isolate->heap()->InNewSpace(obj));
@@ -368,7 +367,7 @@ void Assembler::emit(uint32_t x) {
void Assembler::emit(Handle<Object> handle) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
ASSERT(!isolate()->heap()->InNewSpace(obj));
@@ -395,7 +394,7 @@ void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
void Assembler::emit(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId id) {
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 7b32f1b1de..c0b2abd512 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -2351,7 +2351,7 @@ void Assembler::movd(const Operand& dst, XMMRegister src) {
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
EMIT(0x66);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 8ded78558b..5d11452890 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -411,7 +411,7 @@ class Operand BASE_EMBEDDED {
}
static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
- ALLOW_HANDLE_DEREF(Isolate::Current(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
return Operand(reinterpret_cast<int32_t>(cell.location()),
RelocInfo::GLOBAL_PROPERTY_CELL);
}
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 2b45d7654d..bf4ee949ed 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -486,6 +486,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ __ mov(ebx, Immediate(undefined_sentinel));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -1455,14 +1459,20 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic internal array code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ } else {
+ ArrayNativeCode(masm, false, &generic_array_code);
+
+ // Jump to the generic internal array code in case the specialized code
+ // cannot handle the construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->InternalArrayCodeGeneric();
+ __ jmp(array_code, RelocInfo::CODE_TARGET);
+ }
}
@@ -1488,14 +1498,24 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ // tail call a stub
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ mov(ebx, Immediate(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ } else {
+ ArrayNativeCode(masm, false, &generic_array_code);
+
+ // Jump to the generic internal array code in case the specialized code
+ // cannot handle the construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
+ __ jmp(array_code, RelocInfo::CODE_TARGET);
+ }
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 507aeb6772..ad1c65db2b 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -30,7 +30,6 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
-#include "builtins-decls.h"
#include "code-stubs.h"
#include "isolate.h"
#include "jsregexp.h"
@@ -50,7 +49,6 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
static Register registers[] = { eax, ebx, ecx };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
}
@@ -62,7 +60,6 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
static Register registers[] = { eax, ebx, ecx, edx };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
}
@@ -74,7 +71,6 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
static Register registers[] = { edx, ecx };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
}
@@ -86,7 +82,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { edx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@@ -97,7 +92,6 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { edx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@@ -143,7 +137,29 @@ static void InitializeArrayConstructorDescriptor(
descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // eax -- number of arguments
+ // edi -- constructor function
+ static Register registers[] = { edi };
+ descriptor->register_param_count_ = 1;
+
+ if (constant_stack_parameter_count != 0) {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &eax;
+ }
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
}
@@ -168,6 +184,27 @@ void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
void CompareNilICStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -176,8 +213,20 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
- descriptor->miss_handler_ =
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+}
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
}
@@ -200,7 +249,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
for (int i = 0; i < param_count; ++i) {
__ push(descriptor->register_params_[i]);
}
- ExternalReference miss = descriptor->miss_handler_;
+ ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
}
@@ -469,116 +518,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-// The stub expects its argument on the stack and returns its result in tos_:
-// zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- Factory* factory = masm->isolate()->factory();
- const Register argument = eax;
- const Register map = edx;
-
- if (!types_.IsEmpty()) {
- __ mov(argument, Operand(esp, 1 * kPointerSize));
- }
-
- // undefined -> false
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- Label not_smi;
- __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ mov(tos_, argument);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_smi);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(argument, &patch, Label::kNear);
- }
-
- if (types_.NeedsMap()) {
- __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- // Undetectable -> false.
- Label not_undetectable;
- __ j(zero, &not_undetectable, Label::kNear);
- __ Set(tos_, Immediate(0));
- __ ret(1 * kPointerSize);
- __ bind(&not_undetectable);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // spec object -> true.
- Label not_js_object;
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, &not_js_object, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_js_object);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
- __ ret(1 * kPointerSize); // the string length is OK as the return value
- __ bind(&not_string);
- }
-
- if (types_.Contains(SYMBOL)) {
- // Symbol value -> true.
- Label not_symbol;
- __ CmpInstanceType(map, SYMBOL_TYPE);
- __ j(not_equal, &not_symbol, Label::kNear);
- __ bind(&not_symbol);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number, false_result;
- __ cmp(map, factory->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ fldz();
- __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, &false_result, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ Set(tos_, Immediate(0));
- __ ret(1 * kPointerSize);
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@@ -614,44 +553,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- const Register argument = eax;
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- Label different_value;
- __ CompareRoot(argument, value);
- __ j(not_equal, &different_value, Label::kNear);
- if (!result) {
- // If we have to return zero, there is no way around clearing tos_.
- __ Set(tos_, Immediate(0));
- } else if (!tos_.is(argument)) {
- // If we have to return non-zero, we can re-use the argument if it is the
- // same register as the result, because we never see Smi-zero here.
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&different_value);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Get return address, operand is now on top of stack.
- __ push(Immediate(Smi::FromInt(tos_.code())));
- __ push(Immediate(Smi::FromInt(types_.ToByte())));
- __ push(ecx); // Push return address.
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
class FloatingPointHelper : public AllStatic {
public:
enum ArgLocation {
@@ -707,12 +608,6 @@ class FloatingPointHelper : public AllStatic {
// Expects operands in edx, eax.
static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
- // Checks that the two floating point numbers loaded into xmm0 and xmm1
- // have int32 values.
- static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
- Label* non_int32,
- Register scratch);
-
// Checks that |operand| has an int32 value. If |int32_result| is different
// from |scratch|, it will contain that int32 value.
static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
@@ -1611,7 +1506,7 @@ static void BinaryOpStub_GenerateSmiCode(
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
+ Label right_arg_changed, call_runtime;
switch (op_) {
case Token::ADD:
@@ -1632,6 +1527,13 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
UNREACHABLE();
}
+ if (op_ == Token::MOD && has_fixed_right_arg_) {
+ // It is guaranteed that the value will fit into a Smi, because if it
+ // didn't, we wouldn't be here, see BinaryOp_Patch.
+ __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
+ __ j(not_equal, &right_arg_changed);
+ }
+
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
BinaryOpStub_GenerateSmiCode(
@@ -1643,6 +1545,7 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
// Code falls through if the result is not returned as either a smi or heap
// number.
+ __ bind(&right_arg_changed);
switch (op_) {
case Token::ADD:
case Token::SUB:
@@ -1745,8 +1648,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- Label not_floats;
- Label not_int32;
+ Label not_floats, not_int32, right_arg_changed;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
// It could be that only SMIs have been seen at either the left
@@ -1762,8 +1664,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ JumpIfNotSmi(eax, &not_int32);
}
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, &not_int32, xmm0, ebx, ecx, xmm2);
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, &not_int32, xmm1, edi, ecx, xmm2);
if (op_ == Token::MOD) {
+ if (has_fixed_right_arg_) {
+ __ cmp(edi, Immediate(fixed_right_arg_value()));
+ __ j(not_equal, &right_arg_changed);
+ }
GenerateRegisterArgsPush(masm);
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
} else {
@@ -1816,6 +1725,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&not_floats);
__ bind(&not_int32);
+ __ bind(&right_arg_changed);
GenerateTypeTransition(masm);
break;
}
@@ -2907,14 +2817,6 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
}
-void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
- Label* non_int32,
- Register scratch) {
- CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, scratch, xmm2);
- CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, scratch, xmm2);
-}
-
-
void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
Label* non_int32,
XMMRegister operand,
@@ -3468,6 +3370,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ Isolate* isolate = masm->isolate();
+
// esp[0] : return address
// esp[4] : number of parameters (tagged)
// esp[8] : receiver displacement
@@ -3599,7 +3503,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ j(zero, &skip_parameter_map);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->non_strict_arguments_elements_map()));
+ Immediate(isolate->factory()->non_strict_arguments_elements_map()));
__ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
__ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
@@ -3620,7 +3524,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ add(ebx, Operand(esp, 4 * kPointerSize));
__ sub(ebx, eax);
- __ mov(ecx, FACTORY->the_hole_value());
+ __ mov(ecx, isolate->factory()->the_hole_value());
__ mov(edx, edi);
__ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
// eax = loop variable (tagged)
@@ -3655,7 +3559,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// esp[16] = address of receiver argument
// Copy arguments header and remaining slots (if there are any).
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
+ Immediate(isolate->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
Label arguments_loop, arguments_test;
@@ -3691,6 +3595,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ Isolate* isolate = masm->isolate();
+
// esp[0] : return address
// esp[4] : number of parameters
// esp[8] : receiver displacement
@@ -3761,7 +3667,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
+ Immediate(isolate->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Untag the length for the loop below.
@@ -4778,7 +4684,6 @@ static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// megamorphic.
// ebx : cache cell for call target
// edi : the function to call
- ASSERT(!FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done;
@@ -7860,14 +7765,16 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Ecx is the only volatile register we must save.
+ const int kNumSavedRegisters = 1;
__ push(ecx);
// Calculate and push the original stack pointer.
- __ lea(eax, Operand(esp, kPointerSize));
+ __ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
__ push(eax);
- // Calculate and push the function address.
- __ mov(eax, Operand(eax, 0));
+ // Retrieve our return address and use it to calculate the calling
+ // function's address.
+ __ mov(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
__ sub(eax, Immediate(Assembler::kCallInstructionLength));
__ push(eax);
@@ -7964,8 +7871,12 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
+ T stub(kind, false);
stub.GetCode(isolate)->set_is_pregenerated(true);
+ if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ }
}
}
@@ -7980,6 +7891,21 @@ void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
}
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count_ == ANY)
@@ -8065,6 +7991,107 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ mov(ecx, Operand(esp, kPointerSize));
+ __ test(ecx, ecx);
+ __ j(zero, &normal_sequence);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+ }
+
+ __ bind(&normal_sequence);
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ bind(&not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- ebx : type info cell
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, "Unexpected initial map for Array function");
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ if (FLAG_optimize_constructed_arrays) {
+ // Figure out the right elements kind
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(ecx, Map::kElementsKindMask);
+ __ shr(ecx, Map::kElementsKindShift);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &done);
+ __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ Assert(equal,
+ "Invalid ElementsKind for InternalArray or InternalPackedArray");
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, true, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 7663c6a7fd..d562238893 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -1057,50 +1057,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
}
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, "Non-smi index");
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, "Non-smi value");
-
- __ cmp(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, "Index is too large");
-
- __ cmp(index, Immediate(Smi::FromInt(0)));
- __ Check(greater_equal, "Index is negative");
-
- __ push(value);
- __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, "Unexpected string type");
- __ pop(value);
- }
-
- __ SmiUntag(value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ SmiUntag(index);
- __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- } else {
- // No need to untag a smi for two-byte addressing.
- __ mov_w(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- }
-}
-
-
static Operand ExpConstant(int index) {
return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
}
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 5137274145..6db381e47e 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -43,7 +43,7 @@ class CompilationInfo;
class CodeGenerator {
public:
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
+ static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 9f3c4e97f6..16befa910c 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -118,7 +118,7 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation nha;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 1bc72ec314..c77faaad80 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -641,9 +641,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ push(result_register());
- __ CallStub(&stub, condition->test_id());
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
__ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -1034,9 +1033,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, &exit);
@@ -1199,6 +1197,64 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+ __ CompareRoot(eax, Heap::kNullValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(eax, &convert);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &done_convert);
+ __ bind(&convert);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@@ -1932,10 +1988,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 1 * kPointerSize] iter
// [sp + 0 * kPointerSize] g
- Label l_catch, l_try, l_resume, l_send, l_call, l_loop;
+ Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
// Initial send value is undefined.
__ mov(eax, isolate()->factory()->undefined_value());
- __ jmp(&l_send);
+ __ jmp(&l_next);
// catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
__ bind(&l_catch);
@@ -1964,14 +2020,14 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_resume); // received in eax
__ PopTryHandler();
- // receiver = iter; f = iter.send; arg = received;
- __ bind(&l_send);
+ // receiver = iter; f = iter.next; arg = received;
+ __ bind(&l_next);
__ mov(edx, Operand(esp, 1 * kPointerSize)); // iter
__ push(edx); // iter
__ push(eax); // received
- __ mov(ecx, isolate()->factory()->send_string()); // "send"
- Handle<Code> send_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(send_ic); // iter.send in eax
+ __ mov(ecx, isolate()->factory()->next_string()); // "next"
+ Handle<Code> next_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(next_ic); // iter.next in eax
// result = f.call(receiver, arg);
__ bind(&l_call);
@@ -2003,9 +2059,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(ecx, isolate()->factory()->done_string()); // "done"
Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(done_ic); // result.done in eax
- ToBooleanStub stub(eax);
- __ push(eax);
- __ CallStub(&stub);
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
__ test(eax, eax);
__ j(zero, &l_try);
@@ -2074,7 +2129,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
- if (resume_mode == JSGeneratorObject::SEND) {
+ if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
__ cmp(edx, Immediate(0));
__ j(not_zero, &slow_resume);
@@ -2925,7 +2980,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Check for fast case object. Return false for slow case objects.
__ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
__ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, FACTORY->hash_table_map());
+ __ cmp(ecx, isolate()->factory()->hash_table_map());
__ j(equal, if_false);
// Look for valueOf string in the descriptor array, and indicate false if
@@ -2954,7 +3009,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, FieldOperand(ebx, 0));
- __ cmp(edx, FACTORY->value_of_string());
+ __ cmp(edx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
__ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
@@ -3373,19 +3428,57 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, "Non-smi index");
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, "Non-smi value");
+
+ __ cmp(index, FieldOperand(string, String::kLengthOffset));
+ __ Check(less, "Index is too large");
+
+ __ cmp(index, Immediate(Smi::FromInt(0)));
+ __ Check(greater_equal, "Index is negative");
+
+ __ push(value);
+ __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ cmp(value, Immediate(encoding_mask));
+ __ Check(equal, "Unexpected string type");
+ __ pop(value);
+}
+
+
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
+ Register string = eax;
+ Register index = ebx;
+ Register value = ecx;
+
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(ecx);
- __ pop(ebx);
+ __ pop(value);
+ __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
- context()->Plug(eax);
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
+ value);
+ context()->Plug(string);
}
@@ -3393,15 +3486,26 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
+ Register string = eax;
+ Register index = ebx;
+ Register value = ecx;
+
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(ecx);
- __ pop(ebx);
+ __ pop(value);
+ __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
- context()->Plug(eax);
+ if (FLAG_debug_code) {
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ }
+
+ __ SmiUntag(value);
+ // No need to untag a smi for two-byte addressing.
+ __ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize),
+ value);
+ context()->Plug(string);
}
@@ -4664,18 +4768,14 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- EqualityKind kind = expr->op() == Token::EQ_STRICT
- ? kStrictEquality : kNonStrictEquality;
Handle<Object> nil_value = nil == kNullValue
? isolate()->factory()->null_value()
: isolate()->factory()->undefined_value();
- if (kind == kStrictEquality) {
+ if (expr->op() == Token::EQ_STRICT) {
__ cmp(eax, nil_value);
Split(equal, if_true, if_false, fall_through);
} else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
- kNonStrictEquality,
- nil);
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
__ test(eax, eax);
Split(not_zero, if_true, if_false, fall_through);
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index e05031b8e7..95c7c029d6 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -92,7 +92,8 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
__ j(not_zero, miss);
__ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CheckMap(r0, FACTORY->hash_table_map(), miss, DONT_DO_SMI_CHECK);
+ __ CheckMap(r0, masm->isolate()->factory()->hash_table_map(), miss,
+ DONT_DO_SMI_CHECK);
}
@@ -270,7 +271,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
__ CheckMap(scratch,
- FACTORY->fixed_array_map(),
+ masm->isolate()->factory()->fixed_array_map(),
not_fast_array,
DONT_DO_SMI_CHECK);
} else {
@@ -282,7 +283,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
+ __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
@@ -1353,6 +1354,23 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- ecx : key
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index b6244af412..7d685bff32 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -590,7 +590,7 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
return constant->handle();
}
@@ -603,7 +603,12 @@ double LCodeGen::ToDouble(LConstantOperand* op) const {
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
@@ -1026,8 +1031,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { ALLOW_HANDLE_DEREF(isolate(),
- "copying a ZoneList of handles into a FixedArray");
+ { AllowDeferredHandleDereference copy_handles;
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
@@ -1230,110 +1234,115 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ HMod* hmod = instr->hydrogen();
+ HValue* left = hmod->left();
+ HValue* right = hmod->right();
+ if (hmod->HasPowerOf2Divisor()) {
+ // TODO(svenpanne) We should really do the strength reduction on the
+ // Hydrogen level.
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(ToRegister(instr->result())));
- if (divisor < 0) divisor = -divisor;
+ // Note: The code below even works when right contains kMinInt.
+ int32_t divisor = Abs(right->GetInteger32Constant());
- Label positive_dividend, done;
- __ test(dividend, Operand(dividend));
- __ j(not_sign, &positive_dividend, Label::kNear);
- __ neg(dividend);
- __ and_(dividend, divisor - 1);
- __ neg(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ j(not_zero, &done, Label::kNear);
- DeoptimizeIf(no_condition, instr->environment());
- } else {
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ test(left_reg, Operand(left_reg));
+ __ j(not_sign, &left_is_not_negative, Label::kNear);
+ __ neg(left_reg);
+ __ and_(left_reg, divisor - 1);
+ __ neg(left_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
__ jmp(&done, Label::kNear);
}
- __ bind(&positive_dividend);
- __ and_(dividend, divisor - 1);
+
+ __ bind(&left_is_not_negative);
+ __ and_(left_reg, divisor - 1);
__ bind(&done);
- } else {
- Label done, remainder_eq_dividend, slow, both_positive;
+
+ } else if (hmod->has_fixed_right_arg()) {
Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(ToRegister(instr->result())));
Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
+ int32_t divisor = hmod->fixed_right_arg_value();
+ ASSERT(IsPowerOf2(divisor));
+
+ // Check if our assumption of a fixed right operand still holds.
+ __ cmp(right_reg, Immediate(divisor));
+ DeoptimizeIf(not_equal, instr->environment());
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ test(left_reg, Operand(left_reg));
+ __ j(not_sign, &left_is_not_negative, Label::kNear);
+ __ neg(left_reg);
+ __ and_(left_reg, divisor - 1);
+ __ neg(left_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ jmp(&done, Label::kNear);
+ }
+
+ __ bind(&left_is_not_negative);
+ __ and_(left_reg, divisor - 1);
+ __ bind(&done);
+
+ } else {
+ Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(eax));
- ASSERT(result_reg.is(edx));
+ Register right_reg = ToRegister(instr->right());
ASSERT(!right_reg.is(eax));
ASSERT(!right_reg.is(edx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(edx));
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (right->CanBeZero()) {
__ test(right_reg, Operand(right_reg));
DeoptimizeIf(zero, instr->environment());
}
- __ test(left_reg, Operand(left_reg));
- __ j(zero, &remainder_eq_dividend, Label::kNear);
- __ j(sign, &slow, Label::kNear);
-
- __ test(right_reg, Operand(right_reg));
- __ j(not_sign, &both_positive, Label::kNear);
- // The sign of the divisor doesn't matter.
- __ neg(right_reg);
-
- __ bind(&both_positive);
- // If the dividend is smaller than the nonnegative
- // divisor, the dividend is the result.
- __ cmp(left_reg, Operand(right_reg));
- __ j(less, &remainder_eq_dividend, Label::kNear);
-
- // Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->temp());
- __ mov(scratch, right_reg);
- __ sub(Operand(scratch), Immediate(1));
- __ test(scratch, Operand(right_reg));
- __ j(not_zero, &slow, Label::kNear);
- __ and_(left_reg, Operand(scratch));
- __ jmp(&remainder_eq_dividend, Label::kNear);
-
- // Slow case, using idiv instruction.
- __ bind(&slow);
-
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ Label no_overflow_possible;
__ cmp(left_reg, kMinInt);
- __ j(not_zero, &left_not_min_int, Label::kNear);
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Set(result_reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&no_overflow_possible);
}
- // Sign extend to edx.
+ // Sign extend dividend in eax into edx:eax.
__ cdq();
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (left->CanBeNegative() &&
+ hmod->CanBeZero() &&
+ hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive_left;
- Label done;
__ test(left_reg, Operand(left_reg));
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
__ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
-
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
__ bind(&positive_left);
- __ idiv(right_reg);
- __ bind(&done);
- } else {
- __ idiv(right_reg);
}
- __ jmp(&done, Label::kNear);
-
- __ bind(&remainder_eq_dividend);
- __ mov(result_reg, left_reg);
-
+ __ idiv(right_reg);
__ bind(&done);
}
}
@@ -1342,8 +1351,7 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
int32_t test_value = 0;
int32_t power = 0;
@@ -1366,10 +1374,26 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
if (test_value != 0) {
- // Deoptimize if remainder is not 0.
- __ test(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sar(dividend, power);
+ if (instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ Label done, negative;
+ __ cmp(dividend, 0);
+ __ j(less, &negative, Label::kNear);
+ __ sar(dividend, power);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&negative);
+ __ neg(dividend);
+ __ sar(dividend, power);
+ if (divisor > 0) __ neg(dividend);
+ __ bind(&done);
+ return; // Don't fall through to "__ neg" below.
+ } else {
+ // Deoptimize if remainder is not 0.
+ __ test(dividend, Immediate(test_value));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ sar(dividend, power);
+ }
}
if (divisor < 0) __ neg(dividend);
@@ -1416,11 +1440,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cdq();
__ idiv(right_reg);
- if (!instr->is_flooring()) {
- // Deoptimize if remainder is not 0.
- __ test(edx, Operand(edx));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
+ if (instr->is_flooring()) {
Label done;
__ test(edx, edx);
__ j(zero, &done, Label::kNear);
@@ -1428,6 +1448,11 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ sar(edx, 31);
__ add(eax, edx);
__ bind(&done);
+ } else if (!instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ // Deoptimize if remainder is not 0.
+ __ test(edx, Operand(edx));
+ DeoptimizeIf(not_zero, instr->environment());
}
}
@@ -1746,7 +1771,11 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
+ __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
__ Set(ToRegister(instr->result()), Immediate(instr->value()));
}
@@ -1801,7 +1830,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (handle->IsHeapObject()) {
__ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else {
@@ -1896,11 +1925,32 @@ void LCodeGen::DoDateField(LDateField* instr) {
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
+ Register string = ToRegister(instr->string());
+ Register index = ToRegister(instr->index());
+ Register value = ToRegister(instr->value());
+ String::Encoding encoding = instr->encoding();
+
+ if (FLAG_debug_code) {
+ __ push(value);
+ __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, "Unexpected string type");
+ __ pop(value);
+ }
+
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ } else {
+ __ mov_w(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
+ value);
+ }
}
@@ -2098,14 +2148,16 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- CpuFeatureScope scope(masm(), SSE2);
Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ test(reg, Operand(reg));
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
+ ASSERT(!info()->IsStub());
+ CpuFeatureScope scope(masm(), SSE2);
XMMRegister reg = ToDoubleRegister(instr->value());
__ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
@@ -2115,9 +2167,11 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
__ cmp(reg, factory()->true_value());
EmitBranch(true_block, false_block, equal);
} else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
__ test(reg, Operand(reg));
EmitBranch(true_block, false_block, not_equal);
} else {
@@ -2201,8 +2255,15 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ xorps(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ } else {
+ __ fldz();
+ __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ __ FCmp();
+ }
__ j(zero, false_label);
__ jmp(true_label);
__ bind(&not_heap_number);
@@ -2279,9 +2340,19 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
__ j(parity_even, chunk_->GetAssemblyLabel(false_block));
} else {
if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left), ToInteger32Immediate(right));
+ int32_t const_value = ToInteger32(LConstantOperand::cast(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ cmp(ToOperand(left), Immediate(Smi::FromInt(const_value)));
+ } else {
+ __ cmp(ToOperand(left), Immediate(const_value));
+ }
} else if (left->IsConstantOperand()) {
- __ cmp(ToOperand(right), ToInteger32Immediate(left));
+ int32_t const_value = ToInteger32(LConstantOperand::cast(left));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ cmp(ToOperand(right), Immediate(Smi::FromInt(const_value)));
+ } else {
+ __ cmp(ToOperand(right), Immediate(const_value));
+ }
// We transposed the operands. Reverse the condition.
cc = ReverseCondition(cc);
} else {
@@ -2299,10 +2370,11 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
if (instr->right()->IsConstantOperand()) {
- __ cmp(left, ToHandle(LConstantOperand::cast(instr->right())));
+ Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
+ __ CmpObject(left, right);
} else {
Operand right = ToOperand(instr->right());
- __ cmp(left, Operand(right));
+ __ cmp(left, right);
}
EmitBranch(true_block, false_block, equal);
}
@@ -2940,7 +3012,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- int offset = instr->hydrogen()->offset();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Register object = ToRegister(instr->object());
if (FLAG_track_double_fields &&
instr->hydrogen()->representation().IsDouble()) {
@@ -2956,7 +3029,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
+ if (access.IsInobject()) {
__ mov(result, FieldOperand(object, offset));
} else {
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
@@ -3010,7 +3083,7 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (object->IsSmi()) {
__ Push(Handle<Smi>::cast(object));
} else {
@@ -3064,7 +3137,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
Label check_passed;
- __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CompareMap(object, map, &check_passed);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
__ bind(&check_passed);
@@ -3337,7 +3410,7 @@ Operand LCodeGen::BuildFastArrayOperand(
+ offset);
} else {
// Take the tag bit into account while computing the shift size.
- if (key_representation.IsTagged() && (shift_size >= 1)) {
+ if (key_representation.IsSmi() && (shift_size >= 1)) {
shift_size -= kSmiTagSize;
}
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
@@ -3907,7 +3980,10 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
- if (exponent_type.IsTagged()) {
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(eax, &no_deopt);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
@@ -4183,14 +4259,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ Set(eax, Immediate(instr->arity()));
__ mov(ebx, instr->hydrogen()->property_cell());
ElementsKind kind = instr->hydrogen()->elements_kind();
+ bool disable_allocation_sites =
+ (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind);
+ ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
- ArraySingleArgumentConstructorStub stub(kind);
+ ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else {
- ArrayNArgumentsConstructorStub stub(kind);
+ ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4212,23 +4291,17 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Representation representation = instr->representation();
Register object = ToRegister(instr->object());
-
- int offset = instr->offset();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Handle<Map> transition = instr->transition();
if (FLAG_track_fields && representation.IsSmi()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsInteger32(operand_value)) {
+ if (!IsSmi(operand_value)) {
DeoptimizeIf(no_condition, instr->environment());
}
- } else {
- Register value = ToRegister(instr->value());
- __ SmiTag(value);
- if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(overflow, instr->environment());
- }
}
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
if (instr->value()->IsConstantOperand()) {
@@ -4245,7 +4318,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
- ASSERT(instr->is_in_object());
+ ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
@@ -4285,7 +4358,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Register write_register = object;
- if (!instr->is_in_object()) {
+ if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
__ mov(write_register,
FieldOperand(object, JSObject::kPropertiesOffset));
@@ -4293,12 +4366,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32(operand_value)) {
- // In lithium register preparation, we made sure that the constant integer
- // operand fits into smi range.
- Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
- __ mov(FieldOperand(write_register, offset), Immediate(smi_value));
- } else if (operand_value->IsRegister()) {
+ if (operand_value->IsRegister()) {
__ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
} else {
Handle<Object> handle_value = ToHandle(operand_value);
@@ -4311,7 +4379,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->hydrogen()->NeedsWriteBarrier()) {
Register value = ToRegister(instr->value());
- Register temp = instr->is_in_object() ? ToRegister(instr->temp()) : object;
+ Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(write_register,
offset,
@@ -4343,7 +4411,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
__ cmp(ToOperand(instr->length()),
Immediate(Smi::FromInt(constant_index)));
} else {
@@ -4769,6 +4837,16 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
+ Register input = ToRegister(instr->value());
+ __ SmiTag(input);
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+}
+
+
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
CpuFeatureScope scope(masm(), SSE2);
LOperand* input = instr->value();
@@ -5026,46 +5104,55 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
LOperand* input = instr->value();
+ Register result = ToRegister(input);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
- __ test(ToRegister(input), Immediate(kSmiTagMask));
+ __ test(result, Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
} else {
- __ AssertSmi(ToRegister(input));
+ __ AssertSmi(result);
}
- __ SmiUntag(ToRegister(input));
+ __ SmiUntag(result);
}
void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
Register temp_reg,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
+ NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (deoptimize_on_undefined) {
+ if (!allow_undefined_as_nan) {
DeoptimizeIf(not_equal, env);
} else {
- Label heap_number;
+ Label heap_number, convert;
__ j(equal, &heap_number, Label::kNear);
+ // Convert undefined (or hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
+ __ j(equal, &convert, Label::kNear);
+ __ cmp(input_reg, factory()->the_hole_value());
+ }
DeoptimizeIf(not_equal, env);
- // Convert undefined to NaN.
+ __ bind(&convert);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
__ fld_d(Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
+
__ bind(&heap_number);
}
// Heap number to x87 conversion.
@@ -5086,16 +5173,6 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ test(input_reg, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, env);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi);
- ExternalReference hole_nan_reference =
- ExternalReference::address_of_the_hole_nan();
- __ fld_d(Operand::StaticVariable(hole_nan_reference));
- __ jmp(&done, Label::kNear);
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -5113,29 +5190,36 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
void LCodeGen::EmitNumberUntagD(Register input_reg,
Register temp_reg,
XMMRegister result_reg,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
+ NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (deoptimize_on_undefined) {
+ if (!allow_undefined_as_nan) {
DeoptimizeIf(not_equal, env);
} else {
- Label heap_number;
+ Label heap_number, convert;
__ j(equal, &heap_number, Label::kNear);
+ // Convert undefined (and hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
+ __ j(equal, &convert, Label::kNear);
+ __ cmp(input_reg, factory()->the_hole_value());
+ }
DeoptimizeIf(not_equal, env);
- // Convert undefined to NaN.
+ __ bind(&convert);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
__ movdbl(result_reg, Operand::StaticVariable(nan));
@@ -5155,16 +5239,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ test(input_reg, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, env);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi);
- ExternalReference hole_nan_reference =
- ExternalReference::address_of_the_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(hole_nan_reference));
- __ jmp(&done, Label::kNear);
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -5301,15 +5375,20 @@ void LCodeGen::DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- __ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
- __ xor_(result_reg, result_reg);
- __ jmp(&done, Label::kFar);
- __ bind(&heap_number);
+ if (instr->truncating()) {
+ __ j(equal, &heap_number, Label::kNear);
+ // Check for undefined. Undefined is converted to zero for truncating
+ // conversions.
+ __ cmp(input_reg, factory()->undefined_value());
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
+ DeoptimizeIf(not_equal, instr->environment());
+ __ xor_(result_reg, result_reg);
+ __ jmp(&done, Label::kFar);
+ __ bind(&heap_number);
+ } else {
+ // Deoptimize if we don't have a heap number.
+ DeoptimizeIf(not_equal, instr->environment());
+ }
// Surprisingly, all of this crazy bit manipulation is considerably
// faster than using the built-in x86 CPU conversion functions (about 6x).
@@ -5464,20 +5543,12 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
+ if (value->representation().IsSmi()) {
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ } else if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
}
}
@@ -5487,14 +5558,14 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
EmitNumberUntagD(input_reg,
temp_reg,
result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->allow_undefined_as_nan(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
} else {
EmitNumberUntagDNoSSE2(input_reg,
temp_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->allow_undefined_as_nan(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
@@ -5621,6 +5692,41 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
}
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+ CpuFeatureScope scope(masm(), SSE2);
+
+ XMMRegister input_reg = ToDoubleRegister(input);
+ Register result_reg = ToRegister(result);
+
+ Label done;
+ __ cvttsd2si(result_reg, Operand(input_reg));
+ __ cvtsi2sd(xmm0, Operand(result_reg));
+ __ ucomisd(xmm0, input_reg);
+ DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ __ test(result_reg, Operand(result_reg));
+ __ j(not_zero, &done, Label::kNear);
+ __ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // deoptimize.
+ __ and_(result_reg, 1);
+ DeoptimizeIf(not_zero, instr->environment());
+ __ bind(&done);
+ }
+ __ SmiTag(result_reg);
+ DeoptimizeIf(overflow, instr->environment());
+}
+
+
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
@@ -5697,10 +5803,9 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
- CompareMapMode mode,
LInstruction* instr) {
Label success;
- __ CompareMap(reg, map, &success, mode);
+ __ CompareMap(reg, map, &success);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&success);
}
@@ -5715,11 +5820,11 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
SmallMapList* map_set = instr->hydrogen()->map_set();
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
+ __ CompareMap(reg, map, &success);
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
+ DoCheckMapCommon(reg, map, instr);
__ bind(&success);
}
@@ -5911,101 +6016,12 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
} else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr);
+ DoCheckMapCommon(reg, maps->at(i), instr);
}
}
}
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- instance_size >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- initial_map->pre_allocated_property_fields());
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- initial_map->unused_property_fields());
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- initial_map->inobject_properties());
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ mov(FieldOperand(result, JSObject::kMapOffset), map);
- __ mov(scratch, factory()->empty_fixed_array());
- __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ mov(scratch, factory()->undefined_value());
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ mov(FieldOperand(result, property_offset), scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ push(Immediate(Smi::FromInt(instance_size)));
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -6029,8 +6045,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
}
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
@@ -6063,8 +6083,12 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace());
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
+ } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) {
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInOldDataSpace, 1, instr, instr->context());
} else {
CallRuntimeFromDeferred(
Runtime::kAllocateInNewSpace, 1, instr, instr->context());
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 9f8d4fd363..647dd0e4c0 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -111,9 +111,13 @@ class LCodeGen BASE_EMBEDDED {
bool IsX87TopOfStack(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
Immediate ToInteger32Immediate(LOperand* op) const {
return Immediate(ToInteger32(LConstantOperand::cast(op)));
}
+ Immediate ToSmiImmediate(LOperand* op) const {
+ return Immediate(Smi::FromInt(ToInteger32(LConstantOperand::cast(op))));
+ }
// Support for non-sse2 (x87) floating point stack handling.
// These functions maintain the depth of the stack (either 0 or 1)
@@ -155,13 +159,11 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LInstruction* instr);
+ void DoCheckMapCommon(Register reg, Handle<Map> map, LInstruction* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -328,7 +330,7 @@ class LCodeGen BASE_EMBEDDED {
Register input,
Register temp,
XMMRegister result,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
@@ -336,7 +338,7 @@ class LCodeGen BASE_EMBEDDED {
void EmitNumberUntagDNoSSE2(
Register input,
Register temp,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 6c7e375ad6..3da8f320d0 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -306,7 +306,9 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32(constant_source)) {
+ if (cgen_->IsSmi(constant_source)) {
+ __ Set(dst, cgen_->ToSmiImmediate(constant_source));
+ } else if (cgen_->IsInteger32(constant_source)) {
__ Set(dst, cgen_->ToInteger32Immediate(constant_source));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
@@ -314,7 +316,9 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsInteger32(constant_source)) {
+ if (cgen_->IsSmi(constant_source)) {
+ __ Set(dst, cgen_->ToSmiImmediate(constant_source));
+ } else if (cgen_->IsInteger32(constant_source)) {
__ Set(dst, cgen_->ToInteger32Immediate(constant_source));
} else {
Register tmp = EnsureTempRegister();
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index dec5697f87..325ed2c7fd 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -416,8 +416,7 @@ LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ hydrogen()->access().PrintTo(stream);
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -453,7 +452,14 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
} else {
stream->Add("] <- ");
}
- value()->PrintTo(stream);
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
}
@@ -758,6 +764,12 @@ LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
}
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -770,9 +782,9 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ if (instr->representation().IsSmiOrTagged()) {
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
@@ -841,8 +853,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
+ ASSERT(left->representation().IsSmiOrTagged());
+ ASSERT(right->representation().IsSmiOrTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left_operand = UseFixed(left, edx);
LOperand* right_operand = UseFixed(right, eax);
@@ -1392,9 +1404,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
@@ -1435,7 +1447,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1512,43 +1524,54 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LInstruction* result;
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod =
- new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
- result = DefineSameAsFirst(mod);
+ ASSERT(!right->CanBeZero());
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseOrConstant(right),
+ NULL);
+ LInstruction* result = DefineSameAsFirst(mod);
+ return (left->CanBeNegative() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero))
+ ? AssignEnvironment(result)
+ : result;
+ } else if (instr->has_fixed_right_arg()) {
+ LModI* mod = new(zone()) LModI(UseRegister(left),
+ UseRegisterAtStart(right),
+ NULL);
+ return AssignEnvironment(DefineSameAsFirst(mod));
} else {
- // The temporary operand is necessary to ensure that right is
- // not allocated into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* value = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new(zone()) LModI(value, divisor, temp);
- result = DefineFixed(mod, edx);
+ // The temporary operand is necessary to ensure that right is not
+ // allocated into edx.
+ LModI* mod = new(zone()) LModI(UseFixed(left, eax),
+ UseRegister(right),
+ FixedTemp(edx));
+ LInstruction* result = DefineFixed(mod, edx);
+ return (right->CanBeZero() ||
+ (left->RangeCanInclude(kMinInt) &&
+ right->RangeCanInclude(-1) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
+ (left->CanBeNegative() &&
+ instr->CanBeZero() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)))
+ ? AssignEnvironment(result)
+ : result;
}
-
- return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kCanOverflow))
- ? AssignEnvironment(result)
- : result;
- } else if (instr->representation().IsTagged()) {
+ } else if (instr->representation().IsSmiOrTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
+ UseFixedDouble(left, xmm2),
+ UseFixedDouble(right, xmm1));
+ return MarkAsCall(DefineFixedDouble(mod, xmm1), instr);
}
}
@@ -1572,7 +1595,7 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
@@ -1593,7 +1616,7 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@@ -1625,7 +1648,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1669,7 +1692,7 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
+ ASSERT(instr->global_object()->representation().IsSmiOrTagged());
LOperand* global_object = UseFixed(instr->global_object(), eax);
LRandom* result = new(zone()) LRandom(global_object);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
@@ -1677,8 +1700,8 @@ LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
@@ -1690,9 +1713,10 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(
+ instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCmpIDAndBranch(left, right);
@@ -1730,7 +1754,7 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
LOperand* temp = TempRegister();
return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
}
@@ -1751,7 +1775,7 @@ LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
- ASSERT(instr ->value()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LIsUndetectableAndBranch(
UseRegisterAtStart(instr->value()), TempRegister());
}
@@ -1845,7 +1869,6 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegister(instr->index());
ASSERT(ecx.is_byte_register());
- // TODO(titzer): the machine code for this instruction overwrites ecx! fix!
LOperand* value = UseFixed(instr->value(), ecx);
LSeqStringSetChar* result =
new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
@@ -1908,6 +1931,13 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
// Only mark conversions that might need to allocate as calling rather than
// all changes. This makes simple, non-allocating conversion not have to force
// building a stack frame.
@@ -1925,6 +1955,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
return AssignEnvironment(DefineX87TOS(res));
}
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
if (instr->value()->type().IsSmi()) {
@@ -1961,6 +1998,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
bool truncating = instr->CanTruncateToInt32();
@@ -1985,6 +2026,15 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return result;
+ }
+ return AssignEnvironment(result);
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -2023,18 +2073,6 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
}
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
// If the target is in new space, we'll emit a global cell compare and so
// want the value in a register. If the target gets promoted before we
@@ -2063,7 +2101,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LOperand* reg = UseFixed(value, eax);
return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
} else {
- ASSERT(input_rep.IsTagged());
+ ASSERT(input_rep.IsSmiOrTagged());
if (CpuFeatures::IsSupported(SSE2)) {
LOperand* reg = UseFixed(value, eax);
// Register allocator doesn't (yet) support allocation of double
@@ -2094,7 +2132,9 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
@@ -2222,7 +2262,7 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
+ instr->key()->representation().IsSmi());
ElementsKind elements_kind = instr->elements_kind();
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
@@ -2291,7 +2331,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
+ instr->key()->representation().IsSmi());
if (instr->value()->representation().IsDouble()) {
LOperand* object = UseRegisterAtStart(instr->elements());
@@ -2304,7 +2344,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyed(object, key, val);
} else {
- ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* obj = UseRegister(instr->elements());
@@ -2401,13 +2441,14 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
- obj = instr->is_in_object()
+ obj = is_in_object
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
@@ -2440,7 +2481,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
+ LOperand* temp = (!is_in_object || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
// We need a temporary register for write barrier of the map field.
@@ -2448,10 +2489,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LStoreNamedField* result =
new(zone()) LStoreNamedField(obj, val, temp, temp_map);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject())) {
- return AssignEnvironment(result);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
}
return result;
}
@@ -2502,15 +2544,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* temp = TempRegister();
- LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index b32ead9138..e43672cdd9 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -44,7 +44,6 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
- V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -82,6 +81,7 @@ class LCodeGen;
V(CmpConstantEqAndBranch) \
V(ConstantD) \
V(ConstantI) \
+ V(ConstantS) \
V(ConstantT) \
V(Context) \
V(DebugBreak) \
@@ -90,6 +90,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleToSmi) \
V(DummyUse) \
V(ElementsKind) \
V(FixedArrayBaseLength) \
@@ -106,6 +107,7 @@ class LCodeGen;
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Integer32ToSmi) \
V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
@@ -1150,6 +1152,15 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
+class LConstantS: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
class LConstantD: public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
@@ -1593,7 +1604,7 @@ inline static bool ExternalArrayOpRequiresTemp(
// Operations that require the key to be divided by two to be converted into
// an index cannot fold the scale operation into a load and need an extra
// temp register to do the work.
- return key_representation.IsTagged() &&
+ return key_representation.IsSmi() &&
(elements_kind == EXTERNAL_BYTE_ELEMENTS ||
elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
elements_kind == EXTERNAL_PIXEL_ELEMENTS);
@@ -1998,6 +2009,19 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
+class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
@@ -2069,6 +2093,19 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
};
+class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
// Truncating conversion from a tagged value to an int32.
class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
public:
@@ -2183,9 +2220,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
virtual void PrintDataTo(StringStream* stream);
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
Representation representation() const {
return hydrogen()->field_representation();
@@ -2432,7 +2466,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
};
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2517,21 +2551,6 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
- public:
- LAllocateObject(LOperand* context, LOperand* temp) {
- inputs_[0] = context;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
class LAllocate: public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 5c18cae461..38b02a52c4 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -580,38 +580,21 @@ void MacroAssembler::StoreNumberToDoubleElements(
void MacroAssembler::CompareMap(Register obj,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
+ Label* early_success) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- j(equal, early_success, Label::kNear);
- cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(current_map));
- }
- }
- }
}
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
- CompareMap(obj, map, &success, mode);
+ CompareMap(obj, map, &success);
j(not_equal, fail);
bind(&success);
}
@@ -2510,7 +2493,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
@@ -2521,8 +2504,20 @@ void MacroAssembler::LoadHeapObject(Register result,
}
+void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ cmp(reg, Operand::Cell(cell));
+ } else {
+ cmp(reg, object);
+ }
+}
+
+
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
@@ -3032,7 +3027,7 @@ void MacroAssembler::EnsureNotWhite(
// Check for heap-number
mov(map, FieldOperand(value, HeapObject::kMapOffset));
- cmp(map, FACTORY->heap_number_map());
+ cmp(map, isolate()->factory()->heap_number_map());
j(not_equal, &not_heap_number, Label::kNear);
mov(length, Immediate(HeapNumber::kSize));
jmp(&is_data_object, Label::kNear);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index e7a075d10d..8380507ec0 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -272,10 +272,11 @@ class MacroAssembler: public Assembler {
void LoadFromSafepointRegisterSlot(Register dst, Register src);
void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void CmpHeapObject(Register reg, Handle<HeapObject> object);
void PushHeapObject(Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
@@ -283,6 +284,15 @@ class MacroAssembler: public Assembler {
}
}
+ void CmpObject(Register reg, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ CmpHeapObject(reg, Handle<HeapObject>::cast(object));
+ } else {
+ cmp(reg, Immediate(object));
+ }
+ }
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -399,8 +409,7 @@ class MacroAssembler: public Assembler {
// sequences branches to early_success.
void CompareMap(Register obj,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* early_success);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -409,8 +418,7 @@ class MacroAssembler: public Assembler {
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index d635fe1a8a..9a166d7d48 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -104,7 +104,7 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -209,86 +209,6 @@ void RegExpMacroAssemblerIA32::CheckCharacterLT(uc16 limit, Label* on_less) {
}
-void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
-#ifdef DEBUG
- // If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ASCII character.
- if (mode_ == ASCII) {
- ASSERT(String::IsOneByte(str.start(), str.length()));
- }
-#endif
- int byte_length = str.length() * char_size();
- int byte_offset = cp_offset * char_size();
- if (check_end_of_string) {
- // Check that there are at least str.length() characters left in the input.
- __ cmp(edi, Immediate(-(byte_offset + byte_length)));
- BranchOrBacktrack(greater, on_failure);
- }
-
- if (on_failure == NULL) {
- // Instead of inlining a backtrack, (re)use the global backtrack target.
- on_failure = &backtrack_label_;
- }
-
- // Do one character test first to minimize loading for the case that
- // we don't match at all (loading more than one character introduces that
- // chance of reading unaligned and reading across cache boundaries).
- // If the first character matches, expect a larger chance of matching the
- // string, and start loading more characters at a time.
- if (mode_ == ASCII) {
- __ cmpb(Operand(esi, edi, times_1, byte_offset),
- static_cast<int8_t>(str[0]));
- } else {
- // Don't use 16-bit immediate. The size changing prefix throws off
- // pre-decoding.
- __ movzx_w(eax,
- Operand(esi, edi, times_1, byte_offset));
- __ cmp(eax, static_cast<int32_t>(str[0]));
- }
- BranchOrBacktrack(not_equal, on_failure);
-
- __ lea(ebx, Operand(esi, edi, times_1, 0));
- for (int i = 1, n = str.length(); i < n;) {
- if (mode_ == ASCII) {
- if (i <= n - 4) {
- int combined_chars =
- (static_cast<uint32_t>(str[i + 0]) << 0) |
- (static_cast<uint32_t>(str[i + 1]) << 8) |
- (static_cast<uint32_t>(str[i + 2]) << 16) |
- (static_cast<uint32_t>(str[i + 3]) << 24);
- __ cmp(Operand(ebx, byte_offset + i), Immediate(combined_chars));
- i += 4;
- } else {
- __ cmpb(Operand(ebx, byte_offset + i),
- static_cast<int8_t>(str[i]));
- i += 1;
- }
- } else {
- ASSERT(mode_ == UC16);
- if (i <= n - 2) {
- __ cmp(Operand(ebx, byte_offset + i * sizeof(uc16)),
- Immediate(*reinterpret_cast<const int*>(&str[i])));
- i += 2;
- } else {
- // Avoid a 16-bit immediate operation. It uses the length-changing
- // 0x66 prefix which causes pre-decoder misprediction and pipeline
- // stalls. See
- // "Intel(R) 64 and IA-32 Architectures Optimization Reference Manual"
- // (248966.pdf) section 3.4.2.3 "Length-Changing Prefixes (LCP)"
- __ movzx_w(eax,
- Operand(ebx, byte_offset + i * sizeof(uc16)));
- __ cmp(eax, static_cast<int32_t>(str[i]));
- i += 1;
- }
- }
- BranchOrBacktrack(not_equal, on_failure);
- }
-}
-
-
void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
Label fallthrough;
__ cmp(edi, Operand(backtrack_stackpointer(), 0));
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index 6040d8058a..3933336007 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -52,10 +52,6 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 88ea4b2dc8..3906623a58 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -469,11 +469,12 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// (first fast api call extra argument)
// -- esp[12] : api call data
// -- esp[16] : isolate
- // -- esp[20] : ReturnValue
- // -- esp[24] : last argument
+ // -- esp[20] : ReturnValue default value
+ // -- esp[24] : ReturnValue
+ // -- esp[28] : last argument
// -- ...
- // -- esp[(argc + 5) * 4] : first argument
- // -- esp[(argc + 6) * 4] : receiver
+ // -- esp[(argc + 6) * 4] : first argument
+ // -- esp[(argc + 7) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@@ -495,9 +496,11 @@ static void GenerateFastApiCall(MacroAssembler* masm,
Immediate(reinterpret_cast<int>(masm->isolate())));
__ mov(Operand(esp, 5 * kPointerSize),
masm->isolate()->factory()->undefined_value());
+ __ mov(Operand(esp, 6 * kPointerSize),
+ masm->isolate()->factory()->undefined_value());
// Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 5);
+ STATIC_ASSERT(kFastApiCallArguments == 6);
__ lea(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
@@ -783,7 +786,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Label* slow) {
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ miss_label, DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -859,7 +862,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
- miss_restore_name, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ miss_restore_name, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
__ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
@@ -918,6 +921,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
@@ -940,7 +945,9 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
offset,
name_reg,
scratch1,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
} else {
// Write to the properties array.
@@ -965,7 +972,9 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
offset,
name_reg,
receiver_reg,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
}
@@ -988,7 +997,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Label* miss_label) {
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ miss_label, DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -1039,7 +1048,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
- miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
__ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
@@ -1061,6 +1070,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
// TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -1074,7 +1085,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
offset,
name_reg,
scratch1,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
} else {
// Write to the properties array.
@@ -1091,7 +1104,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
offset,
name_reg,
receiver_reg,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
}
@@ -1187,8 +1202,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
bool in_new_space = heap()->InNewSpace(*prototype);
Handle<Map> current_map(current->map());
if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Check access rights to the global object. This has to happen after
@@ -1229,8 +1243,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CheckMap(reg, Handle<Map>(holder->map()), miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
@@ -1376,8 +1389,10 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
} else {
__ push(Immediate(Handle<Object>(callback->data(), isolate())));
}
- __ push(Immediate(reinterpret_cast<int>(isolate())));
__ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
+ // ReturnValue default value
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ push(Immediate(reinterpret_cast<int>(isolate())));
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const ExecutableAccessorInfo& to the C++ callback.
@@ -1410,7 +1425,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ CallApiFunctionAndReturn(getter_address,
kStackSpace,
returns_handle,
- 4);
+ 6);
}
@@ -2886,8 +2901,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Label miss;
// Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), Handle<Map>(object->map()),
- &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -3169,145 +3183,6 @@ Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
}
-// Specialized stub for constructing objects from functions which only have only
-// simple assignments of the form this.x = ...; in their body.
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_stub_call;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ JumpIfSmi(ebx, &generic_stub_call);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ j(not_equal, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // ebx: initial map
- __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ Check(not_equal, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject on the heap by moving the new space allocation
- // top forward.
- // ebx: initial map
- ASSERT(function->has_initial_map());
- int instance_size = function->initial_map()->instance_size();
-#ifdef DEBUG
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ shl(ecx, kPointerSizeLog2);
- __ cmp(ecx, Immediate(instance_size));
- __ Check(equal, "Instance size of initial map changed.");
-#endif
- __ Allocate(instance_size, edx, ecx, no_reg, &generic_stub_call,
- NO_ALLOCATION_FLAGS);
-
- // Allocated the JSObject, now initialize the fields and add the heap tag.
- // ebx: initial map
- // edx: JSObject (untagged)
- __ mov(Operand(edx, JSObject::kMapOffset), ebx);
- __ mov(ebx, factory()->empty_fixed_array());
- __ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
- __ mov(Operand(edx, JSObject::kElementsOffset), ebx);
-
- // Push the allocated object to the stack. This is the object that will be
- // returned (after it is tagged).
- __ push(edx);
-
- // eax: argc
- // edx: JSObject (untagged)
- // Load the address of the first in-object property into edx.
- __ lea(edx, Operand(edx, JSObject::kHeaderSize));
- // Calculate the location of the first argument. The stack contains the
- // allocated object and the return address on top of the argc arguments.
- __ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
-
- // Use edi for holding undefined which is used in several places below.
- __ mov(edi, factory()->undefined_value());
-
- // eax: argc
- // ecx: first argument
- // edx: first in-object property of the JSObject
- // edi: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- // Check if the argument assigned to the property is actually passed.
- // If argument is not passed the property is set to undefined,
- // otherwise find it on the stack.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ mov(ebx, edi);
- __ cmp(eax, arg_number);
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatureScope use_cmov(masm(), CMOV);
- __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
- } else {
- Label not_passed;
- __ j(below_equal, &not_passed);
- __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
- __ bind(&not_passed);
- }
- // Store value in the property.
- __ mov(Operand(edx, i * kPointerSize), ebx);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
- isolate());
- __ mov(Operand(edx, i * kPointerSize), Immediate(constant));
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ mov(Operand(edx, i * kPointerSize), edi);
- }
-
- // Move argc to ebx and retrieve and tag the JSObject to return.
- __ mov(ebx, eax);
- __ pop(eax);
- __ or_(eax, Immediate(kHeapObjectTag));
-
- // Remove caller arguments and receiver from the stack and return.
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
- __ push(ecx);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ IncrementCounter(counters->constructed_objects_stub(), 1);
- __ ret(0);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index ea0c1fbbe1..94e8773a16 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -937,7 +937,13 @@ MaybeObject* LoadIC::Load(State state,
// Update inline cache and stub cache.
if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, object, name);
+ if (!object->IsJSObject()) {
+ // TODO(jkummerow): It would be nice to support non-JSObjects in
+ // UpdateCaches, then we wouldn't need to go generic here.
+ set_target(*generic_stub());
+ } else {
+ UpdateCaches(&lookup, state, object, name);
+ }
}
PropertyAttributes attr;
@@ -991,7 +997,7 @@ bool IC::UpdatePolymorphicIC(State state,
int handler_to_overwrite = -1;
Handle<Map> new_receiver_map(receiver->map());
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
target()->FindAllMaps(&receiver_maps);
int number_of_maps = receiver_maps.length();
number_of_valid_maps = number_of_maps;
@@ -1059,7 +1065,7 @@ void IC::CopyICToMegamorphicCache(Handle<String> name) {
MapHandleList receiver_maps;
CodeHandleList handlers;
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
target()->FindAllMaps(&receiver_maps);
target()->FindAllCode(&handlers, receiver_maps.length());
}
@@ -1070,7 +1076,7 @@ void IC::CopyICToMegamorphicCache(Handle<String> name) {
bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Map* current_map = target()->FindFirstMap();
ElementsKind receiver_elements_kind = receiver_map->elements_kind();
@@ -1104,7 +1110,7 @@ void IC::PatchCache(State state,
if (target()->is_load_stub()) {
bool is_same_handler = false;
{
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Code* old_handler = target()->FindFirstCode();
is_same_handler = old_handler == *code;
}
@@ -1169,7 +1175,7 @@ static void GetReceiverMapsForStub(Handle<Code> stub,
break;
}
case POLYMORPHIC: {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -1535,6 +1541,10 @@ static bool LookupForWrite(Handle<JSObject> receiver,
Handle<Map> target(lookup->GetTransitionMapFromMap(receiver->map()));
Map::GeneralizeRepresentation(
target, target->LastAdded(), value->OptimalRepresentation());
+ // Lookup the transition again since the transition tree may have changed
+ // entirely by the migration above.
+ receiver->map()->LookupTransition(*holder, *name, lookup);
+ if (!lookup->IsTransition()) return false;
*state = MONOMORPHIC_PROTOTYPE_FAILURE;
}
return true;
@@ -2224,7 +2234,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
- NoHandleAllocation nha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
JSArray* receiver = JSArray::cast(args[0]);
@@ -2252,7 +2262,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
// it is necessary to extend the properties array of a
// JSObject.
RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
// Convert the parameters
@@ -2329,7 +2339,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) {
RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
@@ -2347,7 +2357,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
@@ -2498,7 +2508,7 @@ RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
if (FLAG_trace_ic) {
PrintF("[UnaryOpIC in ");
JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- PrintF(" (%s->%s)#%s @ %p]\n",
+ PrintF(" %s => %s #%s @ %p]\n",
UnaryOpIC::GetName(previous_type),
UnaryOpIC::GetName(type),
Token::Name(op),
@@ -2572,6 +2582,19 @@ static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
}
+#ifdef DEBUG
+static void TraceBinaryOp(BinaryOpIC::TypeInfo left,
+ BinaryOpIC::TypeInfo right,
+ bool has_fixed_right_arg,
+ int32_t fixed_right_arg_value,
+ BinaryOpIC::TypeInfo result) {
+ PrintF("%s*%s", BinaryOpIC::GetName(left), BinaryOpIC::GetName(right));
+ if (has_fixed_right_arg) PrintF("{%d}", fixed_right_arg_value);
+ PrintF("->%s", BinaryOpIC::GetName(result));
+}
+#endif
+
+
RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
ASSERT(args.length() == 3);
@@ -2580,9 +2603,10 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
Handle<Object> right = args.at<Object>(1);
int key = args.smi_at(2);
Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
- BinaryOpIC::TypeInfo previous_left, previous_right, unused_previous_result;
+
+ BinaryOpIC::TypeInfo previous_left, previous_right, previous_result;
BinaryOpStub::decode_types_from_minor_key(
- key, &previous_left, &previous_right, &unused_previous_result);
+ key, &previous_left, &previous_right, &previous_result);
BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
@@ -2597,43 +2621,60 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
- if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
- if (op == Token::DIV ||
- op == Token::MUL ||
- op == Token::SHR ||
- kSmiValueSize == 32) {
- // Arithmetic on two Smi inputs has yielded a heap number.
- // That is the only way to get here from the Smi stub.
- // With 32-bit Smis, all overflows give heap numbers, but with
- // 31-bit Smis, most operations overflow to int32 results.
- result_type = BinaryOpIC::NUMBER;
- } else {
- // Other operations on SMIs that overflow yield int32s.
- result_type = BinaryOpIC::INT32;
+ bool previous_has_fixed_right_arg =
+ BinaryOpStub::decode_has_fixed_right_arg_from_minor_key(key);
+ int previous_fixed_right_arg_value =
+ BinaryOpStub::decode_fixed_right_arg_value_from_minor_key(key);
+
+ int32_t value;
+ bool new_has_fixed_right_arg =
+ op == Token::MOD &&
+ right->ToInt32(&value) &&
+ BinaryOpStub::can_encode_arg_value(value) &&
+ (previous_overall == BinaryOpIC::UNINITIALIZED ||
+ (previous_has_fixed_right_arg &&
+ previous_fixed_right_arg_value == value));
+ int32_t new_fixed_right_arg_value = new_has_fixed_right_arg ? value : 1;
+
+ if (previous_has_fixed_right_arg == new_has_fixed_right_arg) {
+ if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
+ if (op == Token::DIV ||
+ op == Token::MUL ||
+ op == Token::SHR ||
+ kSmiValueSize == 32) {
+ // Arithmetic on two Smi inputs has yielded a heap number.
+ // That is the only way to get here from the Smi stub.
+ // With 32-bit Smis, all overflows give heap numbers, but with
+ // 31-bit Smis, most operations overflow to int32 results.
+ result_type = BinaryOpIC::NUMBER;
+ } else {
+ // Other operations on SMIs that overflow yield int32s.
+ result_type = BinaryOpIC::INT32;
+ }
}
- }
- if (new_overall == BinaryOpIC::INT32 &&
- previous_overall == BinaryOpIC::INT32) {
- if (new_left == previous_left && new_right == previous_right) {
- result_type = BinaryOpIC::NUMBER;
+ if (new_overall == BinaryOpIC::INT32 &&
+ previous_overall == BinaryOpIC::INT32) {
+ if (new_left == previous_left && new_right == previous_right) {
+ result_type = BinaryOpIC::NUMBER;
+ }
}
}
- BinaryOpStub stub(key, new_left, new_right, result_type);
+ BinaryOpStub stub(key, new_left, new_right, result_type,
+ new_has_fixed_right_arg, new_fixed_right_arg_value);
Handle<Code> code = stub.GetCode(isolate);
if (!code.is_null()) {
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[BinaryOpIC in ");
JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- PrintF(" ((%s+%s)->((%s+%s)->%s))#%s @ %p]\n",
- BinaryOpIC::GetName(previous_left),
- BinaryOpIC::GetName(previous_right),
- BinaryOpIC::GetName(new_left),
- BinaryOpIC::GetName(new_right),
- BinaryOpIC::GetName(result_type),
- Token::Name(op),
- static_cast<void*>(*code));
+ PrintF(" ");
+ TraceBinaryOp(previous_left, previous_right, previous_has_fixed_right_arg,
+ previous_fixed_right_arg_value, previous_result);
+ PrintF(" => ");
+ TraceBinaryOp(new_left, new_right, new_has_fixed_right_arg,
+ new_fixed_right_arg_value, result_type);
+ PrintF(" #%s @ %p]\n", Token::Name(op), static_cast<void*>(*code));
}
#endif
BinaryOpIC ic(isolate);
@@ -2880,7 +2921,7 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
@@ -2892,7 +2933,7 @@ void CompareNilIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
Code::ExtraICState state = target->extended_extra_ic_state();
- CompareNilICStub stub(state, CompareNilICStub::CODE_STUB_IS_MISS);
+ CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED);
stub.ClearTypes();
Code* code = NULL;
@@ -2902,16 +2943,8 @@ void CompareNilIC::Clear(Address address, Code* target) {
}
-MaybeObject* CompareNilIC::DoCompareNilSlow(EqualityKind kind,
- NilValue nil,
+MaybeObject* CompareNilIC::DoCompareNilSlow(NilValue nil,
Handle<Object> object) {
- if (kind == kStrictEquality) {
- if (nil == kNullValue) {
- return Smi::FromInt(object->IsNull());
- } else {
- return Smi::FromInt(object->IsUndefined());
- }
- }
if (object->IsNull() || object->IsUndefined()) {
return Smi::FromInt(true);
}
@@ -2928,9 +2961,10 @@ MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
// types must be supported as a result of the miss.
bool already_monomorphic = stub.IsMonomorphic();
+ CompareNilICStub::Types old_types = stub.GetTypes();
stub.Record(object);
+ old_types.TraceTransition(stub.GetTypes());
- EqualityKind kind = stub.GetKind();
NilValue nil = stub.GetNilValue();
// Find or create the specialized stub to support the new set of types.
@@ -2943,15 +2977,8 @@ MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
} else {
code = stub.GetCode(isolate());
}
-
- patch(*code);
-
- return DoCompareNilSlow(kind, nil, object);
-}
-
-
-void CompareNilIC::patch(Code* code) {
- set_target(code);
+ set_target(*code);
+ return DoCompareNilSlow(nil, object);
}
@@ -2970,28 +2997,23 @@ RUNTIME_FUNCTION(MaybeObject*, Unreachable) {
}
-RUNTIME_FUNCTION(MaybeObject*, ToBoolean_Patch) {
- ASSERT(args.length() == 3);
-
- HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- Register tos = Register::from_code(args.smi_at(1));
- ToBooleanStub::Types old_types(args.smi_at(2));
-
- ToBooleanStub::Types new_types(old_types);
- bool to_boolean_value = new_types.Record(object);
- old_types.TraceTransition(new_types);
-
- ToBooleanStub stub(tos, new_types);
- Handle<Code> code = stub.GetCode(isolate);
- ToBooleanIC ic(isolate);
- ic.patch(*code);
+MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object,
+ Code::ExtraICState extra_ic_state) {
+ ToBooleanStub stub(extra_ic_state);
+ bool to_boolean_value = stub.Record(object);
+ Handle<Code> code = stub.GetCode(isolate());
+ set_target(*code);
return Smi::FromInt(to_boolean_value ? 1 : 0);
}
-void ToBooleanIC::patch(Code* code) {
- set_target(code);
+RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss) {
+ ASSERT(args.length() == 1);
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0);
+ ToBooleanIC ic(isolate);
+ Code::ExtraICState ic_state = ic.target()->extended_extra_ic_state();
+ return ic.ToBoolean(object, ic_state);
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index a044f0cc9d..8c448eb7fd 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -62,7 +62,7 @@ namespace internal {
ICU(CompareIC_Miss) \
ICU(CompareNilIC_Miss) \
ICU(Unreachable) \
- ICU(ToBoolean_Patch)
+ ICU(ToBooleanIC_Miss)
//
// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
// and KeyedStoreIC.
@@ -369,6 +369,7 @@ class LoadIC: public IC {
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
+ static void GenerateRuntimeGetProperty(MacroAssembler* masm);
MUST_USE_RESULT MaybeObject* Load(State state,
Handle<Object> object,
@@ -378,8 +379,7 @@ class LoadIC: public IC {
virtual Code::Kind kind() const { return Code::LOAD_IC; }
virtual Handle<Code> generic_stub() const {
- UNREACHABLE();
- return Handle<Code>::null();
+ return isolate()->builtins()->LoadIC_Slow();
}
virtual Handle<Code> megamorphic_stub() {
@@ -789,19 +789,16 @@ class CompareNilIC: public IC {
static void Clear(Address address, Code* target);
- void patch(Code* code);
-
- static MUST_USE_RESULT MaybeObject* DoCompareNilSlow(EqualityKind kind,
- NilValue nil,
+ static MUST_USE_RESULT MaybeObject* DoCompareNilSlow(NilValue nil,
Handle<Object> object);
};
class ToBooleanIC: public IC {
public:
- explicit ToBooleanIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+ explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
- void patch(Code* code);
+ MaybeObject* ToBoolean(Handle<Object> object, Code::ExtraICState state);
};
@@ -812,6 +809,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);
} } // namespace v8::internal
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index bacbb93ad2..e19d6e28f6 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -54,7 +54,8 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
should_hurry_(false),
marking_speed_(0),
allocated_(0),
- no_marking_scope_depth_(0) {
+ no_marking_scope_depth_(0),
+ unscanned_bytes_of_large_object_(0) {
}
@@ -241,6 +242,7 @@ class IncrementalMarkingMarkingVisitor
chunk->progress_bar());
int end_offset = Min(object_size,
start_offset + kProgressBarScanningChunk);
+ int already_scanned_offset = start_offset;
bool scan_until_end = false;
do {
VisitPointersWithAnchor(heap,
@@ -254,6 +256,8 @@ class IncrementalMarkingMarkingVisitor
chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
+ heap->incremental_marking()->NotifyIncompleteScanOfObject(
+ object_size - (start_offset - already_scanned_offset));
}
} else {
FixedArrayVisitor::Visit(map, object);
@@ -739,8 +743,9 @@ void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
if (map == filler_map) continue;
int size = obj->SizeFromMap(map);
- bytes_to_process -= size;
+ unscanned_bytes_of_large_object_ = 0;
VisitObject(map, obj, size);
+ bytes_to_process -= (size - unscanned_bytes_of_large_object_);
}
}
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
index 47d5a518bf..d47c300ef3 100644
--- a/deps/v8/src/incremental-marking.h
+++ b/deps/v8/src/incremental-marking.h
@@ -220,6 +220,10 @@ class IncrementalMarking {
void UncommitMarkingDeque();
+ void NotifyIncompleteScanOfObject(int unscanned_bytes) {
+ unscanned_bytes_of_large_object_ = unscanned_bytes;
+ }
+
private:
int64_t SpaceLeftInOldSpace();
@@ -274,6 +278,8 @@ class IncrementalMarking {
int no_marking_scope_depth_;
+ int unscanned_bytes_of_large_object_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc
index e678e6cf12..2fc9fd3025 100644
--- a/deps/v8/src/interpreter-irregexp.cc
+++ b/deps/v8/src/interpreter-irregexp.cc
@@ -618,7 +618,7 @@ RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
int start_position) {
ASSERT(subject->IsFlat());
- AssertNoAllocation a;
+ DisallowHeapAllocation no_gc;
const byte* code_base = code_array->GetDataStartAddress();
uc16 previous_char = '\n';
String::FlatContent subject_content = subject->GetFlatContent();
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 8ae0c74d0f..7cce14aa23 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -336,6 +336,9 @@ Isolate* Isolate::default_isolate_ = NULL;
Thread::LocalStorageKey Isolate::isolate_key_;
Thread::LocalStorageKey Isolate::thread_id_key_;
Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
+#ifdef DEBUG
+Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
+#endif // DEBUG
Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
Atomic32 Isolate::isolate_counter_ = 0;
@@ -392,6 +395,9 @@ void Isolate::EnsureDefaultIsolate() {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
+#ifdef DEBUG
+ PerThreadAssertScopeBase::thread_local_key = Thread::CreateThreadLocalKey();
+#endif // DEBUG
thread_data_table_ = new Isolate::ThreadDataTable();
default_isolate_ = new Isolate();
}
@@ -889,7 +895,7 @@ void Isolate::PrintStack(StringStream* accumulator) {
return;
}
// The MentionedObjectCache is not GC-proof at the moment.
- AssertNoAllocation nogc;
+ DisallowHeapAllocation no_gc;
ASSERT(StringStream::IsMentionedObjectCacheClear());
// Avoid printing anything if there are no frames.
@@ -974,7 +980,7 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
ASSERT(receiver->IsAccessCheckNeeded());
// The callers of this method are not expecting a GC.
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
// Skip checks for hidden properties access. Note, we do not
// require existence of a context in this case.
@@ -1332,6 +1338,7 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
}
}
Handle<Object> message_obj = MessageHandler::MakeMessageObject(
+ this,
"uncaught_exception",
location,
HandleVector<Object>(&exception_arg, 1),
@@ -1780,9 +1787,6 @@ Isolate::Isolate()
memset(&js_spill_information_, 0, sizeof(js_spill_information_));
memset(code_kind_statistics_, 0,
sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
-
- compiler_thread_handle_deref_state_ = HandleDereferenceGuard::ALLOW;
- execution_thread_handle_deref_state_ = HandleDereferenceGuard::ALLOW;
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2245,7 +2249,9 @@ bool Isolate::Init(Deserializer* des) {
stub.InitializeInterfaceDescriptor(
this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
CompareNilICStub::InitializeForIsolate(this);
+ ToBooleanStub::InitializeForIsolate(this);
ArrayConstructorStubBase::InstallDescriptors(this);
+ InternalArrayConstructorStubBase::InstallDescriptors(this);
}
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
@@ -2404,34 +2410,6 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
}
-#ifdef DEBUG
-HandleDereferenceGuard::State Isolate::HandleDereferenceGuardState() {
- if (execution_thread_handle_deref_state_ == HandleDereferenceGuard::ALLOW &&
- compiler_thread_handle_deref_state_ == HandleDereferenceGuard::ALLOW) {
- // Short-cut to avoid polling thread id.
- return HandleDereferenceGuard::ALLOW;
- }
- if (FLAG_parallel_recompilation &&
- optimizing_compiler_thread()->IsOptimizerThread()) {
- return compiler_thread_handle_deref_state_;
- } else {
- return execution_thread_handle_deref_state_;
- }
-}
-
-
-void Isolate::SetHandleDereferenceGuardState(
- HandleDereferenceGuard::State state) {
- if (FLAG_parallel_recompilation &&
- optimizing_compiler_thread()->IsOptimizerThread()) {
- compiler_thread_handle_deref_state_ = state;
- } else {
- execution_thread_handle_deref_state_ = state;
- }
-}
-#endif
-
-
HStatistics* Isolate::GetHStatistics() {
if (hstatistics() == NULL) set_hstatistics(new HStatistics());
return hstatistics();
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index de7e35e591..76a5a41e70 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -31,6 +31,7 @@
#include "../include/v8-debug.h"
#include "allocation.h"
#include "apiutils.h"
+#include "assert-scope.h"
#include "atomicops.h"
#include "builtins.h"
#include "contexts.h"
@@ -994,10 +995,6 @@ class Isolate {
}
int* code_kind_statistics() { return code_kind_statistics_; }
-
- HandleDereferenceGuard::State HandleDereferenceGuardState();
-
- void SetHandleDereferenceGuardState(HandleDereferenceGuard::State state);
#endif
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
@@ -1310,9 +1307,6 @@ class Isolate {
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
int code_kind_statistics_[Code::NUMBER_OF_KINDS];
-
- HandleDereferenceGuard::State compiler_thread_handle_deref_state_;
- HandleDereferenceGuard::State execution_thread_handle_deref_state_;
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1486,7 +1480,6 @@ class PostponeInterruptsScope BASE_EMBEDDED {
// Temporary macros for accessing current isolate and its subobjects.
// They provide better readability, especially when used a lot in the code.
#define HEAP (v8::internal::Isolate::Current()->heap())
-#define FACTORY (v8::internal::Isolate::Current()->factory())
#define ISOLATE (v8::internal::Isolate::Current())
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index ddc3b736e3..152bd63716 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -106,7 +106,7 @@ class JsonParser BASE_EMBEDDED {
bool ParseJsonString(Handle<String> expected) {
int length = expected->length();
if (source_->length() - position_ - 1 > length) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
String::FlatContent content = expected->GetFlatContent();
if (content.IsAscii()) {
ASSERT_EQ('"', c0_);
@@ -457,16 +457,6 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
int length = properties.length();
for (int i = 0; i < length; i++) {
Handle<Object> value = properties[i];
- // If the target representation is double and the value is already
- // double, use the existing box.
- if (FLAG_track_double_fields && value->IsSmi()) {
- Representation representation =
- map->instance_descriptors()->GetDetails(i).representation();
- if (representation.IsDouble()) {
- value = factory()->NewHeapNumber(
- Handle<Smi>::cast(value)->value());
- }
- }
json_object->FastPropertyAtPut(i, *value);
}
}
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index b67a9f6b6a..31aebd6ddb 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -300,7 +300,7 @@ MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
if (object->IsOneByteRepresentationUnderneath()) {
Handle<String> result =
isolate->factory()->NewRawOneByteString(worst_case_length);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
return StringifyString_<SeqOneByteString>(
isolate,
object->GetFlatContent().ToOneByteVector(),
@@ -308,7 +308,7 @@ MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
} else {
Handle<String> result =
isolate->factory()->NewRawTwoByteString(worst_case_length);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
return StringifyString_<SeqTwoByteString>(
isolate,
object->GetFlatContent().ToUC16Vector(),
@@ -321,7 +321,7 @@ template <typename ResultType, typename Char>
MaybeObject* BasicJsonStringifier::StringifyString_(Isolate* isolate,
Vector<Char> vector,
Handle<String> result) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_gc;
int final_size = 0;
ResultType* dest = ResultType::cast(*result);
dest->Set(final_size++, '\"');
@@ -640,7 +640,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
if (!name->IsString()) continue;
Handle<String> key = Handle<String>::cast(name);
PropertyDetails details = map->instance_descriptors()->GetDetails(i);
- if (details.IsDontEnum() || details.IsDeleted()) continue;
+ if (details.IsDontEnum()) continue;
Handle<Object> property;
if (details.type() == FIELD && *map == object->map()) {
property = Handle<Object>(
@@ -759,7 +759,7 @@ void BasicJsonStringifier::SerializeString_(Handle<String> string) {
// is a more pessimistic estimate, but faster to calculate.
if (((part_length_ - current_index_) >> 3) > length) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_gc;
Vector<const Char> vector = GetCharVector<Char>(string);
if (is_ascii) {
current_index_ += SerializeStringUnchecked_(
@@ -773,9 +773,16 @@ void BasicJsonStringifier::SerializeString_(Handle<String> string) {
length);
}
} else {
- String* string_location = *string;
- Vector<const Char> vector = GetCharVector<Char>(string);
+ String* string_location = NULL;
+ Vector<const Char> vector(NULL, 0);
for (int i = 0; i < length; i++) {
+ // If GC moved the string, we need to refresh the vector.
+ if (*string != string_location) {
+ DisallowHeapAllocation no_gc;
+ // This does not actually prevent the string from being relocated later.
+ vector = GetCharVector<Char>(string);
+ string_location = *string;
+ }
Char c = vector[i];
if (DoNotEscape(c)) {
Append_<is_ascii, Char>(c);
@@ -783,11 +790,6 @@ void BasicJsonStringifier::SerializeString_(Handle<String> string) {
Append_<is_ascii, uint8_t>(reinterpret_cast<const uint8_t*>(
&JsonEscapeTable[c * kJsonEscapeTableEntrySize]));
}
- // If GC moved the string, we need to refresh the vector.
- if (*string != string_location) {
- vector = GetCharVector<Char>(string);
- string_location = *string;
- }
}
}
@@ -825,17 +827,16 @@ Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) {
void BasicJsonStringifier::SerializeString(Handle<String> object) {
- FlattenString(object);
- String::FlatContent flat = object->GetFlatContent();
+ object = FlattenGetString(object);
if (is_ascii_) {
- if (flat.IsAscii()) {
+ if (object->IsOneByteRepresentation()) {
SerializeString_<true, uint8_t>(object);
} else {
ChangeEncoding();
SerializeString(object);
}
} else {
- if (flat.IsAscii()) {
+ if (object->IsOneByteRepresentation()) {
SerializeString_<false, uint8_t>(object);
} else {
SerializeString_<false, uc16>(object);
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index fd87a80539..7838c04a9e 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -270,7 +270,7 @@ static void SetAtomLastCapture(FixedArray* array,
String* subject,
int from,
int to) {
- NoHandleAllocation no_handles(array->GetIsolate());
+ SealHandleScope shs(array->GetIsolate());
RegExpImpl::SetLastCaptureCount(array, 2);
RegExpImpl::SetLastSubject(array, subject);
RegExpImpl::SetLastInput(array, subject);
@@ -290,7 +290,7 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
ASSERT(index <= subject->length());
if (!subject->IsFlat()) FlattenString(subject);
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
int needle_len = needle->length();
@@ -353,7 +353,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
ASSERT_EQ(res, RegExpImpl::RE_SUCCESS);
- NoHandleAllocation no_handles(isolate);
+ SealHandleScope shs(isolate);
FixedArray* array = FixedArray::cast(last_match_info->elements());
SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]);
return last_match_info;
@@ -691,7 +691,7 @@ Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
ASSERT(last_match_info->HasFastObjectElements());
int capture_register_count = (capture_count + 1) * 2;
last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_allocation;
FixedArray* array = FixedArray::cast(last_match_info->elements());
if (match != NULL) {
for (int i = 0; i < capture_register_count; i += 2) {
@@ -950,10 +950,10 @@ TextElement TextElement::CharClass(
int TextElement::length() {
- if (type == ATOM) {
+ if (text_type == ATOM) {
return data.u_atom->length();
} else {
- ASSERT(type == CHAR_CLASS);
+ ASSERT(text_type == CHAR_CLASS);
return 1;
}
}
@@ -1165,7 +1165,7 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
bool Trace::DeferredAction::Mentions(int that) {
- if (type() == ActionNode::CLEAR_CAPTURES) {
+ if (action_type() == ActionNode::CLEAR_CAPTURES) {
Interval range = static_cast<DeferredClearCaptures*>(this)->range();
return range.Contains(that);
} else {
@@ -1191,7 +1191,7 @@ bool Trace::GetStoredPosition(int reg, int* cp_offset) {
action != NULL;
action = action->next()) {
if (action->Mentions(reg)) {
- if (action->type() == ActionNode::STORE_POSITION) {
+ if (action->action_type() == ActionNode::STORE_POSITION) {
*cp_offset = static_cast<DeferredCapture*>(action)->cp_offset();
return true;
} else {
@@ -1209,7 +1209,7 @@ int Trace::FindAffectedRegisters(OutSet* affected_registers,
for (DeferredAction* action = actions_;
action != NULL;
action = action->next()) {
- if (action->type() == ActionNode::CLEAR_CAPTURES) {
+ if (action->action_type() == ActionNode::CLEAR_CAPTURES) {
Interval range = static_cast<DeferredClearCaptures*>(action)->range();
for (int i = range.from(); i <= range.to(); i++)
affected_registers->Set(i, zone);
@@ -1273,7 +1273,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
action != NULL;
action = action->next()) {
if (action->Mentions(reg)) {
- switch (action->type()) {
+ switch (action->action_type()) {
case ActionNode::SET_REGISTER: {
Trace::DeferredSetRegister* psr =
static_cast<Trace::DeferredSetRegister*>(action);
@@ -1873,8 +1873,9 @@ static void EmitUseLookupTable(
for (int i = j; i < kSize; i++) {
templ[i] = bit;
}
+ Factory* factory = Isolate::Current()->factory();
// TODO(erikcorry): Cache these.
- Handle<ByteArray> ba = FACTORY->NewByteArray(kSize, TENURED);
+ Handle<ByteArray> ba = factory->NewByteArray(kSize, TENURED);
for (int i = 0; i < kSize; i++) {
ba->set(i, templ[i]);
}
@@ -2303,7 +2304,7 @@ int ActionNode::EatsAtLeast(int still_to_find,
int budget,
bool not_at_start) {
if (budget <= 0) return 0;
- if (type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
+ if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
return on_success()->EatsAtLeast(still_to_find,
budget - 1,
not_at_start);
@@ -2314,9 +2315,9 @@ void ActionNode::FillInBMInfo(int offset,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
- if (type_ == BEGIN_SUBMATCH) {
+ if (action_type_ == BEGIN_SUBMATCH) {
bm->SetRest(offset);
- } else if (type_ != POSITIVE_SUBMATCH_SUCCESS) {
+ } else if (action_type_ != POSITIVE_SUBMATCH_SUCCESS) {
on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
@@ -2332,7 +2333,7 @@ int AssertionNode::EatsAtLeast(int still_to_find,
// implies false. So lets just return the max answer (still_to_find) since
// that won't prevent us from preloading a lot of characters for the other
// branches in the node graph.
- if (type() == AT_START && not_at_start) return still_to_find;
+ if (assertion_type() == AT_START && not_at_start) return still_to_find;
return on_success()->EatsAtLeast(still_to_find,
budget - 1,
not_at_start);
@@ -2344,7 +2345,7 @@ void AssertionNode::FillInBMInfo(int offset,
BoyerMooreLookahead* bm,
bool not_at_start) {
// Match the behaviour of EatsAtLeast on this node.
- if (type() == AT_START && not_at_start) return;
+ if (assertion_type() == AT_START && not_at_start) return;
on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@@ -2561,7 +2562,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
}
for (int k = 0; k < elms_->length(); k++) {
TextElement elm = elms_->at(k);
- if (elm.type == TextElement::ATOM) {
+ if (elm.text_type == TextElement::ATOM) {
Vector<const uc16> quarks = elm.data.u_atom->data();
for (int i = 0; i < characters && i < quarks.length(); i++) {
QuickCheckDetails::Position* pos =
@@ -2814,7 +2815,7 @@ RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
- if (elm.type == TextElement::ATOM) {
+ if (elm.text_type == TextElement::ATOM) {
Vector<const uc16> quarks = elm.data.u_atom->data();
for (int j = 0; j < quarks.length(); j++) {
uint16_t c = quarks[j];
@@ -2830,7 +2831,7 @@ RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
copy[j] = converted;
}
} else {
- ASSERT(elm.type == TextElement::CHAR_CLASS);
+ ASSERT(elm.text_type == TextElement::CHAR_CLASS);
RegExpCharacterClass* cc = elm.data.u_char_class;
ZoneList<CharacterRange>* ranges = cc->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
@@ -3085,7 +3086,7 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
if (lookahead->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
if (lookahead->at(0)->is_word()) next_is_word_character = Trace::TRUE;
}
- bool at_boundary = (type_ == AssertionNode::AT_BOUNDARY);
+ bool at_boundary = (assertion_type_ == AssertionNode::AT_BOUNDARY);
if (next_is_word_character == Trace::UNKNOWN) {
Label before_non_word;
Label before_word;
@@ -3148,7 +3149,7 @@ void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int filled_in,
bool not_at_start) {
- if (type_ == AT_START && not_at_start) {
+ if (assertion_type_ == AT_START && not_at_start) {
details->set_cannot_match();
return;
}
@@ -3161,7 +3162,7 @@ void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RegExpMacroAssembler* assembler = compiler->macro_assembler();
- switch (type_) {
+ switch (assertion_type_) {
case AT_END: {
Label ok;
assembler->CheckPosition(trace->cp_offset(), &ok);
@@ -3254,7 +3255,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
TextElement elm = elms_->at(i);
int cp_offset = trace->cp_offset() + elm.cp_offset;
- if (elm.type == TextElement::ATOM) {
+ if (elm.text_type == TextElement::ATOM) {
Vector<const uc16> quarks = elm.data.u_atom->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
if (first_element_checked && i == 0 && j == 0) continue;
@@ -3292,7 +3293,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
}
}
} else {
- ASSERT_EQ(elm.type, TextElement::CHAR_CLASS);
+ ASSERT_EQ(elm.text_type, TextElement::CHAR_CLASS);
if (pass == CHARACTER_CLASS_MATCH) {
if (first_element_checked && i == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset)) continue;
@@ -3315,7 +3316,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
int TextNode::Length() {
TextElement elm = elms_->last();
ASSERT(elm.cp_offset >= 0);
- if (elm.type == TextElement::ATOM) {
+ if (elm.text_type == TextElement::ATOM) {
return elm.cp_offset + elm.data.u_atom->data().length();
} else {
return elm.cp_offset + 1;
@@ -3421,7 +3422,7 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
- if (elm.type == TextElement::CHAR_CLASS) {
+ if (elm.text_type == TextElement::CHAR_CLASS) {
RegExpCharacterClass* cc = elm.data.u_char_class;
// None of the standard character classes is different in the case
// independent case and it slows us down if we don't know that.
@@ -3438,7 +3439,7 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
int TextNode::GreedyLoopTextLength() {
TextElement elm = elms_->at(elms_->length() - 1);
- if (elm.type == TextElement::CHAR_CLASS) {
+ if (elm.text_type == TextElement::CHAR_CLASS) {
return elm.cp_offset + 1;
} else {
return elm.cp_offset + elm.data.u_atom->data().length();
@@ -3450,7 +3451,7 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) {
if (elms_->length() != 1) return NULL;
TextElement elm = elms_->at(0);
- if (elm.type != TextElement::CHAR_CLASS) return NULL;
+ if (elm.text_type != TextElement::CHAR_CLASS) return NULL;
RegExpCharacterClass* node = elm.data.u_char_class;
ZoneList<CharacterRange>* ranges = node->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
@@ -3827,8 +3828,8 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
return true;
}
- Handle<ByteArray> boolean_skip_table =
- FACTORY->NewByteArray(kSize, TENURED);
+ Factory* factory = Isolate::Current()->factory();
+ Handle<ByteArray> boolean_skip_table = factory->NewByteArray(kSize, TENURED);
int skip_distance = GetSkipTable(
min_lookahead, max_lookahead, boolean_skip_table);
ASSERT(skip_distance != 0);
@@ -4195,7 +4196,7 @@ void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RecursionCheck rc(compiler);
- switch (type_) {
+ switch (action_type_) {
case STORE_POSITION: {
Trace::DeferredCapture
new_capture(data_.u_position_register.reg,
@@ -4525,7 +4526,7 @@ void DotPrinter::VisitText(TextNode* that) {
for (int i = 0; i < that->elements()->length(); i++) {
if (i > 0) stream()->Add(" ");
TextElement elm = that->elements()->at(i);
- switch (elm.type) {
+ switch (elm.text_type) {
case TextElement::ATOM: {
stream()->Add("'%w'", elm.data.u_atom->data());
break;
@@ -4572,7 +4573,7 @@ void DotPrinter::VisitEnd(EndNode* that) {
void DotPrinter::VisitAssertion(AssertionNode* that) {
stream()->Add(" n%p [", that);
- switch (that->type()) {
+ switch (that->assertion_type()) {
case AssertionNode::AT_END:
stream()->Add("label=\"$\", shape=septagon");
break;
@@ -4599,7 +4600,7 @@ void DotPrinter::VisitAssertion(AssertionNode* that) {
void DotPrinter::VisitAction(ActionNode* that) {
stream()->Add(" n%p [", that);
- switch (that->type_) {
+ switch (that->action_type_) {
case ActionNode::SET_REGISTER:
stream()->Add("label=\"$%i:=%i\", shape=octagon",
that->data_.u_store_register.reg,
@@ -5012,7 +5013,7 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
NodeInfo info;
Zone* zone = compiler->zone();
- switch (type()) {
+ switch (assertion_type()) {
case START_OF_LINE:
return AssertionNode::AfterNewline(on_success);
case START_OF_INPUT:
@@ -5714,7 +5715,7 @@ void TextNode::CalculateOffsets() {
for (int i = 0; i < element_count; i++) {
TextElement& elm = elements()->at(i);
elm.cp_offset = cp_offset;
- if (elm.type == TextElement::ATOM) {
+ if (elm.text_type == TextElement::ATOM) {
cp_offset += elm.data.u_atom->data().length();
} else {
cp_offset++;
@@ -5834,7 +5835,7 @@ void TextNode::FillInBMInfo(int initial_offset,
return;
}
TextElement text = elements()->at(i);
- if (text.type == TextElement::ATOM) {
+ if (text.text_type == TextElement::ATOM) {
RegExpAtom* atom = text.data.u_atom;
for (int j = 0; j < atom->length(); j++, offset++) {
if (offset >= bm->length()) {
@@ -5857,7 +5858,7 @@ void TextNode::FillInBMInfo(int initial_offset,
}
}
} else {
- ASSERT(text.type == TextElement::CHAR_CLASS);
+ ASSERT(text.text_type == TextElement::CHAR_CLASS);
RegExpCharacterClass* char_class = text.data.u_char_class;
ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
if (char_class->is_negated()) {
@@ -5970,7 +5971,7 @@ void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
void DispatchTableConstructor::VisitText(TextNode* that) {
TextElement elm = that->elements()->at(0);
- switch (elm.type) {
+ switch (elm.text_type) {
case TextElement::ATOM: {
uc16 c = elm.data.u_atom->data()[0];
AddRange(CharacterRange(c, c));
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 625f1925e3..181a1b26b1 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -429,13 +429,13 @@ FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
class TextElement {
public:
- enum Type {UNINITIALIZED, ATOM, CHAR_CLASS};
- TextElement() : type(UNINITIALIZED) { }
- explicit TextElement(Type t) : type(t), cp_offset(-1) { }
+ enum TextType {UNINITIALIZED, ATOM, CHAR_CLASS};
+ TextElement() : text_type(UNINITIALIZED) { }
+ explicit TextElement(TextType t) : text_type(t), cp_offset(-1) { }
static TextElement Atom(RegExpAtom* atom);
static TextElement CharClass(RegExpCharacterClass* char_class);
int length();
- Type type;
+ TextType text_type;
union {
RegExpAtom* u_atom;
RegExpCharacterClass* u_char_class;
@@ -739,7 +739,7 @@ class SeqRegExpNode: public RegExpNode {
class ActionNode: public SeqRegExpNode {
public:
- enum Type {
+ enum ActionType {
SET_REGISTER,
INCREMENT_REGISTER,
STORE_POSITION,
@@ -780,7 +780,7 @@ class ActionNode: public SeqRegExpNode {
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
- Type type() { return type_; }
+ ActionType action_type() { return action_type_; }
// TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
@@ -813,10 +813,10 @@ class ActionNode: public SeqRegExpNode {
int range_to;
} u_clear_captures;
} data_;
- ActionNode(Type type, RegExpNode* on_success)
+ ActionNode(ActionType action_type, RegExpNode* on_success)
: SeqRegExpNode(on_success),
- type_(type) { }
- Type type_;
+ action_type_(action_type) { }
+ ActionType action_type_;
friend class DotPrinter;
};
@@ -876,7 +876,7 @@ class TextNode: public SeqRegExpNode {
class AssertionNode: public SeqRegExpNode {
public:
- enum AssertionNodeType {
+ enum AssertionType {
AT_END,
AT_START,
AT_BOUNDARY,
@@ -909,8 +909,7 @@ class AssertionNode: public SeqRegExpNode {
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
- AssertionNodeType type() { return type_; }
- void set_type(AssertionNodeType type) { type_ = type; }
+ AssertionType assertion_type() { return assertion_type_; }
private:
void EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace);
@@ -918,9 +917,9 @@ class AssertionNode: public SeqRegExpNode {
void BacktrackIfPrevious(RegExpCompiler* compiler,
Trace* trace,
IfPrevious backtrack_if_previous);
- AssertionNode(AssertionNodeType t, RegExpNode* on_success)
- : SeqRegExpNode(on_success), type_(t) { }
- AssertionNodeType type_;
+ AssertionNode(AssertionType t, RegExpNode* on_success)
+ : SeqRegExpNode(on_success), assertion_type_(t) { }
+ AssertionType assertion_type_;
};
@@ -1337,14 +1336,14 @@ class Trace {
class DeferredAction {
public:
- DeferredAction(ActionNode::Type type, int reg)
- : type_(type), reg_(reg), next_(NULL) { }
+ DeferredAction(ActionNode::ActionType action_type, int reg)
+ : action_type_(action_type), reg_(reg), next_(NULL) { }
DeferredAction* next() { return next_; }
bool Mentions(int reg);
int reg() { return reg_; }
- ActionNode::Type type() { return type_; }
+ ActionNode::ActionType action_type() { return action_type_; }
private:
- ActionNode::Type type_;
+ ActionNode::ActionType action_type_;
int reg_;
DeferredAction* next_;
friend class Trace;
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 74132b3b76..1fd921f191 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -1057,7 +1057,7 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
LInstruction* branch =
InstructionAt(cur_block->last_instruction_index());
if (branch->HasPointerMap()) {
- if (phi->representation().IsTagged()) {
+ if (phi->representation().IsTagged() && !phi->type().IsSmi()) {
branch->pointer_map()->RecordPointer(phi_operand, zone());
} else if (!phi->representation().IsDouble()) {
branch->pointer_map()->RecordUntagged(phi_operand, zone());
@@ -1348,6 +1348,7 @@ void LAllocator::BuildLiveRanges() {
PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
} else {
ASSERT(chunk_->info()->IsOptimizing());
+ AllowHandleDereference allow_deref;
PrintF("Function: %s\n",
*chunk_->info()->function()->debug_name()->ToCString());
}
@@ -1640,7 +1641,7 @@ void LAllocator::TraceAlloc(const char* msg, ...) {
bool LAllocator::HasTaggedValue(int virtual_register) const {
HValue* value = graph_->LookupValue(virtual_register);
if (value == NULL) return false;
- return value->representation().IsTagged();
+ return value->representation().IsTagged() && !value->type().IsSmi();
}
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 539f4eefba..2993c9aa73 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -419,9 +419,8 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunk::NewChunk(HGraph* graph) {
- NoHandleAllocation no_handles(graph->isolate());
- AssertNoAllocation no_gc;
-
+ DisallowHandleAllocation no_handles;
+ DisallowHeapAllocation no_gc;
int values = graph->GetMaximumValueID();
CompilationInfo* info = graph->info();
if (values > LUnallocated::kMaxVirtualRegisters) {
@@ -455,10 +454,7 @@ Handle<Code> LChunk::Codegen() {
MarkEmptyBlocks();
if (generator.GenerateCode()) {
- if (FLAG_trace_codegen) {
- PrintF("Crankshaft Compiler - ");
- }
- CodeGenerator::MakeCodePrologue(info());
+ CodeGenerator::MakeCodePrologue(info(), "optimized");
Code::Flags flags = info()->flags();
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 388f5658db..170e5c89bc 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -558,7 +558,7 @@ class LEnvironment: public ZoneObject {
Representation representation,
bool is_uint32) {
values_.Add(operand, zone());
- if (representation.IsTagged()) {
+ if (representation.IsSmiOrTagged()) {
ASSERT(!is_uint32);
is_tagged_.Add(values_.length() - 1);
}
@@ -769,9 +769,8 @@ int StackSlotOffset(int index);
enum NumberUntagDMode {
NUMBER_CANDIDATE_IS_SMI,
- NUMBER_CANDIDATE_IS_SMI_OR_HOLE,
- NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE,
- NUMBER_CANDIDATE_IS_ANY_TAGGED
+ NUMBER_CANDIDATE_IS_ANY_TAGGED,
+ NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE
};
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index b28cd3e872..a01e502300 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -631,10 +631,10 @@ static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
// Wraps any object into a OpaqueReference, that will hide the object
// from JavaScript.
static Handle<JSValue> WrapInJSValue(Handle<Object> object) {
- Handle<JSFunction> constructor =
- Isolate::Current()->opaque_reference_function();
+ Isolate* isolate = Isolate::Current();
+ Handle<JSFunction> constructor = isolate->opaque_reference_function();
Handle<JSValue> result =
- Handle<JSValue>::cast(FACTORY->NewJSObject(constructor));
+ Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
result->set_value(*object);
return result;
}
@@ -662,7 +662,8 @@ template<typename S>
class JSArrayBasedStruct {
public:
static S Create() {
- Handle<JSArray> array = FACTORY->NewJSArray(S::kSize_);
+ Factory* factory = Isolate::Current()->factory();
+ Handle<JSArray> array = factory->NewJSArray(S::kSize_);
return S(array);
}
static S cast(Object* object) {
@@ -1069,7 +1070,7 @@ static void ReplaceCodeObject(Handle<Code> original,
ASSERT(!heap->InNewSpace(*substitution));
- AssertNoAllocation no_allocations_please;
+ DisallowHeapAllocation no_allocation;
ReplacingVisitor visitor(*original, *substitution);
@@ -1144,7 +1145,7 @@ class LiteralFixer {
template<typename Visitor>
static void IterateJSFunctions(SharedFunctionInfo* shared_info,
Visitor* visitor) {
- AssertNoAllocation no_allocations_please;
+ DisallowHeapAllocation no_allocation;
HeapIterator iterator(shared_info->GetHeap());
for (HeapObject* obj = iterator.next(); obj != NULL;
@@ -1219,7 +1220,7 @@ static bool IsJSFunctionCode(Code* code) {
// Returns true if an instance of candidate were inlined into function's code.
static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
if (function->code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
@@ -1257,7 +1258,7 @@ class DependentFunctionFilter : public OptimizedFunctionFilter {
static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
DependentFunctionFilter filter(function_info);
Deoptimizer::DeoptimizeAllFunctionsWith(function_info->GetIsolate(), &filter);
@@ -1293,7 +1294,7 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
if (shared_info->debug_info()->IsDebugInfo()) {
Handle<DebugInfo> debug_info(DebugInfo::cast(shared_info->debug_info()));
Handle<Code> new_original_code =
- FACTORY->CopyCode(compile_info_wrapper.GetFunctionCode());
+ isolate->factory()->CopyCode(compile_info_wrapper.GetFunctionCode());
debug_info->set_original_code(*new_original_code);
}
@@ -1460,12 +1461,13 @@ class RelocInfoBuffer {
static Handle<Code> PatchPositionsInCode(
Handle<Code> code,
Handle<JSArray> position_change_array) {
+ Isolate* isolate = code->GetIsolate();
RelocInfoBuffer buffer_writer(code->relocation_size(),
code->instruction_start());
{
- AssertNoAllocation no_allocations_please;
+ DisallowHeapAllocation no_allocation;
for (RelocIterator it(*code); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsPosition(rinfo->rmode())) {
@@ -1494,7 +1496,7 @@ static Handle<Code> PatchPositionsInCode(
// Relocation info section now has different size. We cannot simply
// rewrite it inside code object. Instead we have to create a new
// code object.
- Handle<Code> result(FACTORY->CopyCode(code, buffer));
+ Handle<Code> result(isolate->factory()->CopyCode(code, buffer));
return result;
}
}
@@ -1542,9 +1544,10 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
static Handle<Script> CreateScriptCopy(Handle<Script> original) {
- Handle<String> original_source(String::cast(original->source()));
+ Isolate* isolate = original->GetIsolate();
- Handle<Script> copy = FACTORY->NewScript(original_source);
+ Handle<String> original_source(String::cast(original->source()));
+ Handle<Script> copy = isolate->factory()->NewScript(original_source);
copy->set_name(original->name());
copy->set_line_offset(original->line_offset());
@@ -2007,7 +2010,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
if (error_message != NULL) {
// Add error message as an array extra element.
Vector<const char> vector_message(error_message, StrLength(error_message));
- Handle<String> str = FACTORY->NewStringFromAscii(vector_message);
+ Handle<String> str = isolate->factory()->NewStringFromAscii(vector_message);
SetElementNonStrict(result, len, str);
}
return result;
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index a44dca0765..f033172734 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -238,7 +238,7 @@ void LogMessageBuilder::Append(const char c) {
void LogMessageBuilder::Append(String* str) {
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
+ DisallowHeapAllocation no_gc; // Ensure string stay valid.
int length = str->length();
for (int i = 0; i < length; i++) {
Append(static_cast<char>(str->Get(i)));
@@ -253,7 +253,7 @@ void LogMessageBuilder::AppendAddress(Address addr) {
void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
if (str == NULL) return;
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
+ DisallowHeapAllocation no_gc; // Ensure string stay valid.
int len = str->length();
if (len > 0x1000)
len = 0x1000;
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 00fa432686..610a63b37a 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1549,7 +1549,7 @@ static int EnumerateCompiledFunctions(Heap* heap,
Handle<SharedFunctionInfo>* sfis,
Handle<Code>* code_objects) {
HeapIterator iterator(heap);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
int compiled_funcs_count = 0;
// Iterate the heap to find shared function info objects and record
@@ -1581,57 +1581,55 @@ static int EnumerateCompiledFunctions(Heap* heap,
void Logger::LogCodeObject(Object* object) {
- if (FLAG_log_code || FLAG_ll_prof || is_logging_code_events()) {
- Code* code_object = Code::cast(object);
- LogEventsAndTags tag = Logger::STUB_TAG;
- const char* description = "Unknown code from the snapshot";
- switch (code_object->kind()) {
- case Code::FUNCTION:
- case Code::OPTIMIZED_FUNCTION:
- return; // We log this later using LogCompiledFunctions.
- case Code::UNARY_OP_IC: // fall through
- case Code::BINARY_OP_IC: // fall through
- case Code::COMPARE_IC: // fall through
- case Code::COMPARE_NIL_IC: // fall through
- case Code::TO_BOOLEAN_IC: // fall through
- case Code::STUB:
- description =
- CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
- if (description == NULL)
- description = "A stub from the snapshot";
- tag = Logger::STUB_TAG;
- break;
- case Code::BUILTIN:
- description = "A builtin from the snapshot";
- tag = Logger::BUILTIN_TAG;
- break;
- case Code::KEYED_LOAD_IC:
- description = "A keyed load IC from the snapshot";
- tag = Logger::KEYED_LOAD_IC_TAG;
- break;
- case Code::LOAD_IC:
- description = "A load IC from the snapshot";
- tag = Logger::LOAD_IC_TAG;
- break;
- case Code::STORE_IC:
- description = "A store IC from the snapshot";
- tag = Logger::STORE_IC_TAG;
- break;
- case Code::KEYED_STORE_IC:
- description = "A keyed store IC from the snapshot";
- tag = Logger::KEYED_STORE_IC_TAG;
- break;
- case Code::CALL_IC:
- description = "A call IC from the snapshot";
- tag = Logger::CALL_IC_TAG;
- break;
- case Code::KEYED_CALL_IC:
- description = "A keyed call IC from the snapshot";
- tag = Logger::KEYED_CALL_IC_TAG;
- break;
- }
- PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
+ Code* code_object = Code::cast(object);
+ LogEventsAndTags tag = Logger::STUB_TAG;
+ const char* description = "Unknown code from the snapshot";
+ switch (code_object->kind()) {
+ case Code::FUNCTION:
+ case Code::OPTIMIZED_FUNCTION:
+ return; // We log this later using LogCompiledFunctions.
+ case Code::UNARY_OP_IC: // fall through
+ case Code::BINARY_OP_IC: // fall through
+ case Code::COMPARE_IC: // fall through
+ case Code::COMPARE_NIL_IC: // fall through
+ case Code::TO_BOOLEAN_IC: // fall through
+ case Code::STUB:
+ description =
+ CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
+ if (description == NULL)
+ description = "A stub from the snapshot";
+ tag = Logger::STUB_TAG;
+ break;
+ case Code::BUILTIN:
+ description = "A builtin from the snapshot";
+ tag = Logger::BUILTIN_TAG;
+ break;
+ case Code::KEYED_LOAD_IC:
+ description = "A keyed load IC from the snapshot";
+ tag = Logger::KEYED_LOAD_IC_TAG;
+ break;
+ case Code::LOAD_IC:
+ description = "A load IC from the snapshot";
+ tag = Logger::LOAD_IC_TAG;
+ break;
+ case Code::STORE_IC:
+ description = "A store IC from the snapshot";
+ tag = Logger::STORE_IC_TAG;
+ break;
+ case Code::KEYED_STORE_IC:
+ description = "A keyed store IC from the snapshot";
+ tag = Logger::KEYED_STORE_IC_TAG;
+ break;
+ case Code::CALL_IC:
+ description = "A call IC from the snapshot";
+ tag = Logger::CALL_IC_TAG;
+ break;
+ case Code::KEYED_CALL_IC:
+ description = "A keyed call IC from the snapshot";
+ tag = Logger::KEYED_CALL_IC_TAG;
+ break;
}
+ PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
}
@@ -1718,7 +1716,7 @@ void Logger::LogCodeObjects() {
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"Logger::LogCodeObjects");
HeapIterator iterator(heap);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsCode()) LogCodeObject(obj);
}
@@ -1796,7 +1794,7 @@ void Logger::LogAccessorCallbacks() {
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"Logger::LogAccessorCallbacks");
HeapIterator iterator(heap);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsExecutableAccessorInfo()) continue;
ExecutableAccessorInfo* ai = ExecutableAccessorInfo::cast(obj);
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 29d8616181..dc2db4b096 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -1002,6 +1002,10 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
Code* code = shared->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
+ if (FLAG_trace_code_flushing && shared->is_compiled()) {
+ SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+ PrintF("[code-flushing clears: %s]\n", *name);
+ }
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
} else {
@@ -1039,6 +1043,10 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
Code* code = candidate->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
+ if (FLAG_trace_code_flushing && candidate->is_compiled()) {
+ SmartArrayPointer<char> name = candidate->DebugName()->ToCString();
+ PrintF("[code-flushing clears: %s]\n", *name);
+ }
candidate->set_code(lazy_compile);
}
@@ -1122,6 +1130,11 @@ void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
// Make sure previous flushing decisions are revisited.
isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
+ if (FLAG_trace_code_flushing) {
+ SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ PrintF("[code-flushing abandons function-info: %s]\n", *name);
+ }
+
SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
SharedFunctionInfo* next_candidate;
if (candidate == shared_info) {
@@ -1153,6 +1166,11 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
isolate_->heap()->incremental_marking()->RecordWrites(function);
isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
+ if (FLAG_trace_code_flushing) {
+ SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
+ PrintF("[code-flushing abandons closure: %s]\n", *name);
+ }
+
JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate;
if (candidate == function) {
@@ -1183,6 +1201,11 @@ void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
// Make sure previous flushing decisions are revisited.
isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
+ if (FLAG_trace_code_flushing) {
+ SmartArrayPointer<char> name = code_map_holder->DebugName()->ToCString();
+ PrintF("[code-flushing abandons code-map: %s]\n", *name);
+ }
+
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
SharedFunctionInfo* next_holder;
if (holder == code_map_holder) {
@@ -2074,22 +2097,16 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
// marking stack have been marked, or are overflowed in the heap.
void MarkCompactCollector::EmptyMarkingDeque() {
while (!marking_deque_.IsEmpty()) {
- while (!marking_deque_.IsEmpty()) {
- HeapObject* object = marking_deque_.Pop();
- ASSERT(object->IsHeapObject());
- ASSERT(heap()->Contains(object));
- ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
- Map* map = object->map();
- MarkBit map_mark = Marking::MarkBitFrom(map);
- MarkObject(map, map_mark);
+ HeapObject* object = marking_deque_.Pop();
+ ASSERT(object->IsHeapObject());
+ ASSERT(heap()->Contains(object));
+ ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
- MarkCompactMarkingVisitor::IterateBody(map, object);
- }
+ Map* map = object->map();
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ MarkObject(map, map_mark);
- // Process encountered weak maps, mark objects only reachable by those
- // weak maps and repeat until fix-point is reached.
- ProcessWeakMaps();
+ MarkCompactMarkingVisitor::IterateBody(map, object);
}
}
@@ -2154,13 +2171,16 @@ void MarkCompactCollector::ProcessMarkingDeque() {
}
-void MarkCompactCollector::ProcessExternalMarking(RootMarkingVisitor* visitor) {
+// Mark all objects reachable (transitively) from objects on the marking
+// stack including references only considered in the atomic marking pause.
+void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
bool work_to_do = true;
ASSERT(marking_deque_.IsEmpty());
while (work_to_do) {
isolate()->global_handles()->IterateObjectGroups(
visitor, &IsUnmarkedHeapObjectWithHeap);
MarkImplicitRefGroups();
+ ProcessWeakMaps();
work_to_do = !marking_deque_.IsEmpty();
ProcessMarkingDeque();
}
@@ -2237,12 +2257,12 @@ void MarkCompactCollector::MarkLiveObjects() {
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
- // application specific logic.
- ProcessExternalMarking(&root_visitor);
+ // application specific logic or through Harmony weak maps.
+ ProcessEphemeralMarking(&root_visitor);
- // The objects reachable from the roots or object groups are marked,
- // yet unreachable objects are unmarked. Mark objects reachable
- // only from weak global handles.
+ // The objects reachable from the roots, weak maps or object groups
+ // are marked, yet unreachable objects are unmarked. Mark objects
+ // reachable only from weak global handles.
//
// First we identify nonlive weak handles and mark them as pending
// destruction.
@@ -2255,9 +2275,9 @@ void MarkCompactCollector::MarkLiveObjects() {
EmptyMarkingDeque();
}
- // Repeat host application specific marking to mark unmarked objects
- // reachable from the weak roots.
- ProcessExternalMarking(&root_visitor);
+ // Repeat host application specific and Harmony weak maps marking to
+ // mark unmarked objects reachable from the weak roots.
+ ProcessEphemeralMarking(&root_visitor);
AfterMarking();
}
@@ -2478,7 +2498,7 @@ void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
- AssertNoAllocation no_allocation_scope;
+ DisallowHeapAllocation no_allocation;
DependentCode* entries = map->dependent_code();
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
@@ -2495,7 +2515,7 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
- AssertNoAllocation no_allocation_scope;
+ DisallowHeapAllocation no_allocation;
DependentCode* entries = map->dependent_code();
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
@@ -2529,6 +2549,7 @@ void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
void MarkCompactCollector::ProcessWeakMaps() {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKMAP_PROCESS);
Object* weak_map_obj = encountered_weak_maps();
while (weak_map_obj != Smi::FromInt(0)) {
ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
@@ -2554,6 +2575,7 @@ void MarkCompactCollector::ProcessWeakMaps() {
void MarkCompactCollector::ClearWeakMaps() {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKMAP_CLEAR);
Object* weak_map_obj = encountered_weak_maps();
while (weak_map_obj != Smi::FromInt(0)) {
ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 0f20440062..873534c2fa 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -841,14 +841,18 @@ class MarkCompactCollector {
// is marked.
void MarkImplicitRefGroups();
- // Mark all objects which are reachable due to host application
- // logic like object groups or implicit references' groups.
- void ProcessExternalMarking(RootMarkingVisitor* visitor);
-
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
void ProcessMarkingDeque();
+ // Mark objects reachable (transitively) from objects in the marking stack
+ // or overflowed in the heap. This respects references only considered in
+ // the final atomic marking pause including the following:
+ // - Processing of objects reachable through Harmony WeakMaps.
+ // - Objects reachable due to host application logic like object groups
+ // or implicit references' groups.
+ void ProcessEphemeralMarking(ObjectVisitor* visitor);
+
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
diff --git a/deps/v8/src/marking-thread.cc b/deps/v8/src/marking-thread.cc
index ac64381268..574485abc7 100644
--- a/deps/v8/src/marking-thread.cc
+++ b/deps/v8/src/marking-thread.cc
@@ -52,6 +52,9 @@ Atomic32 MarkingThread::id_counter_ = -1;
void MarkingThread::Run() {
Isolate::SetIsolateThreadLocals(isolate_, NULL);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
while (true) {
start_marking_semaphore_->Wait();
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index de18a4b1aa..9eae67a728 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -57,23 +57,25 @@ void MessageHandler::DefaultMessageReport(Isolate* isolate,
Handle<JSMessageObject> MessageHandler::MakeMessageObject(
+ Isolate* isolate,
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
- Handle<String> type_handle = FACTORY->InternalizeUtf8String(type);
+ Factory* factory = isolate->factory();
+ Handle<String> type_handle = factory->InternalizeUtf8String(type);
Handle<FixedArray> arguments_elements =
- FACTORY->NewFixedArray(args.length());
+ factory->NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
arguments_elements->set(i, *args[i]);
}
Handle<JSArray> arguments_handle =
- FACTORY->NewJSArrayWithElements(arguments_elements);
+ factory->NewJSArrayWithElements(arguments_elements);
int start = 0;
int end = 0;
- Handle<Object> script_handle = FACTORY->undefined_value();
+ Handle<Object> script_handle = factory->undefined_value();
if (loc) {
start = loc->start_pos();
end = loc->end_pos();
@@ -81,15 +83,15 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
}
Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? Handle<Object>::cast(FACTORY->undefined_value())
+ ? Handle<Object>::cast(factory->undefined_value())
: Handle<Object>::cast(stack_trace);
Handle<Object> stack_frames_handle = stack_frames.is_null()
- ? Handle<Object>::cast(FACTORY->undefined_value())
+ ? Handle<Object>::cast(factory->undefined_value())
: Handle<Object>::cast(stack_frames);
Handle<JSMessageObject> message =
- FACTORY->NewJSMessageObject(type_handle,
+ factory->NewJSMessageObject(type_handle,
arguments_handle,
start,
end,
@@ -122,7 +124,7 @@ void MessageHandler::ReportMessage(Isolate* isolate,
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception_handle);
- v8::NeanderArray global_listeners(FACTORY->message_listeners());
+ v8::NeanderArray global_listeners(isolate->factory()->message_listeners());
int global_length = global_listeners.length();
if (global_length == 0) {
DefaultMessageReport(isolate, loc, message);
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 3361abe231..5d84e46caa 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -91,6 +91,7 @@ class MessageHandler {
public:
// Returns a message object for the API to use.
static Handle<JSMessageObject> MakeMessageObject(
+ Isolate* isolate,
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 296965d37d..ce075ce5e5 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -543,11 +543,11 @@ function ScriptLineCount() {
* If sourceURL comment is available and script starts at zero returns sourceURL
* comment contents. Otherwise, script name is returned. See
* http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
- * for details on using //@ sourceURL comment to identify scritps that don't
- * have name.
+ * and Source Map Revision 3 proposal for details on using //# sourceURL and
+ * deprecated //@ sourceURL comment to identify scripts that don't have name.
*
- * @return {?string} script name if present, value for //@ sourceURL comment
- * otherwise.
+ * @return {?string} script name if present, value for //# sourceURL or
+ * deprecated //@ sourceURL comment otherwise.
*/
function ScriptNameOrSourceURL() {
if (this.line_offset > 0 || this.column_offset > 0) {
@@ -572,7 +572,7 @@ function ScriptNameOrSourceURL() {
this.cachedNameOrSourceURL = this.name;
if (sourceUrlPos > 4) {
var sourceUrlPattern =
- /\/\/@[\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
+ /\/\/[#@][\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
// Don't reuse lastMatchInfo here, so we create a new array with room
// for four captures (array with length one longer than the index
// of the fourth capture, where the numbering is zero-based).
@@ -906,8 +906,8 @@ function CallSiteGetPosition() {
function CallSiteIsConstructor() {
var receiver = this[CallSiteReceiverKey];
- var constructor =
- IS_OBJECT(receiver) ? %GetDataProperty(receiver, "constructor") : null;
+ var constructor = (receiver != null && IS_OBJECT(receiver))
+ ? %GetDataProperty(receiver, "constructor") : null;
if (!constructor) return false;
return this[CallSiteFunctionKey] === constructor;
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 1b666ec6c0..eee79a2156 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -240,7 +240,7 @@ Operand::Operand(Handle<Object> handle) {
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
- ALLOW_HANDLE_DEREF(isolate, "using and embedding raw address");
+ AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
@@ -2198,7 +2198,7 @@ void Assembler::set_target_address_at(Address pc, Address target) {
bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >>
(kImm26Bits + kImmFieldShift)) == 0;
uint32_t target_field =
- static_cast<uint32_t>(itarget & kJumpAddrMask) >>kImmFieldShift;
+ static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
bool patched_jump = false;
#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 973b1bb0be..06273caf78 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -335,9 +335,9 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, a3, t0);
// Set up return value, remove receiver from stack and return.
- __ mov(v0, a2);
__ Addu(sp, sp, Operand(kPointerSize));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a2);
// Check for one argument. Bail out if argument is not smi or if it is
// negative.
@@ -378,9 +378,9 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
__ IncrementCounter(counters->array_function_native(), 1, a2, t0);
// Set up return value, remove receiver and argument from stack and return.
- __ mov(v0, a3);
__ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a3);
// Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more);
@@ -434,8 +434,8 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
// a3: JSArray
// sp[0]: receiver
__ Addu(sp, sp, Operand(kPointerSize));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a3);
- __ Ret();
__ bind(&has_non_smi_element);
// Double values are handled by the runtime.
@@ -498,15 +498,20 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle the
- // construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ // Tail call a stub.
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ } else {
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle the
+ // construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->InternalArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+ }
}
@@ -533,15 +538,24 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ // Tail call a stub.
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ li(a2, Operand(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ } else {
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+ }
}
@@ -1358,15 +1372,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Label with_tos_register, unknown_state;
__ Branch(&with_tos_register,
ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ Ret(USE_DELAY_SLOT);
+ // Safe to fill delay slot Addu will emit one instruction.
__ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
- __ Ret();
__ bind(&with_tos_register);
__ lw(v0, MemOperand(sp, 1 * kPointerSize));
__ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
+ __ Ret(USE_DELAY_SLOT);
+ // Safe to fill delay slot Addu will emit one instruction.
__ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
- __ Ret();
__ bind(&unknown_state);
__ stop("no cases left");
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 7c09bb3e93..3d0577eb1e 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -30,7 +30,6 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
-#include "builtins-decls.h"
#include "code-stubs.h"
#include "codegen.h"
#include "regexp-macro-assembler.h"
@@ -46,7 +45,6 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
static Register registers[] = { a3, a2, a1 };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
}
@@ -58,7 +56,6 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
static Register registers[] = { a3, a2, a1, a0 };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
}
@@ -81,7 +78,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@@ -92,7 +88,6 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { a1 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@@ -128,8 +123,8 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
- descriptor->miss_handler_ =
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
}
@@ -151,7 +146,29 @@ static void InitializeArrayConstructorDescriptor(
descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // a0 -- number of arguments
+ // a1 -- constructor function
+ static Register registers[] = { a1 };
+ descriptor->register_param_count_ = 1;
+
+ if (constant_stack_parameter_count != 0) {
+ // Stack param count needs (constructor pointer, and single argument).
+ descriptor->stack_parameter_count_ = &a0;
+ }
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
}
@@ -176,6 +193,40 @@ void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@@ -218,7 +269,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
for (int i = 0; i < param_count; ++i) {
__ push(descriptor->register_params_[i]);
}
- ExternalReference miss = descriptor->miss_handler_;
+ ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
}
@@ -297,8 +348,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
// Return result. The argument function info has been popped already.
+ __ Ret(USE_DELAY_SLOT);
__ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
- __ Ret();
__ bind(&check_optimized);
@@ -918,9 +969,9 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
__ sw(scratch_, FieldMemOperand(the_heap_number_,
HeapNumber::kExponentOffset));
__ sll(scratch_, the_int_, 32 - shift_distance);
+ __ Ret(USE_DELAY_SLOT);
__ sw(scratch_, FieldMemOperand(the_heap_number_,
HeapNumber::kMantissaOffset));
- __ Ret();
__ bind(&max_negative_int);
// The max negative int32 is stored as a positive number in the mantissa of
@@ -932,9 +983,9 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
__ sw(scratch_,
FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
__ mov(scratch_, zero_reg);
+ __ Ret(USE_DELAY_SLOT);
__ sw(scratch_,
FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
- __ Ret();
}
@@ -952,7 +1003,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
@@ -972,6 +1023,8 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
__ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
__ Branch(&return_equal, ne, a0, Operand(t2));
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
if (cc == le) {
// undefined <= undefined should fail.
__ li(v0, Operand(GREATER));
@@ -979,13 +1032,13 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// undefined >= undefined should fail.
__ li(v0, Operand(LESS));
}
- __ Ret();
}
}
}
__ bind(&return_equal);
-
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
if (cc == less) {
__ li(v0, Operand(GREATER)); // Things aren't less than themselves.
} else if (cc == greater) {
@@ -993,7 +1046,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
} else {
__ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
}
- __ Ret();
// For less and greater we don't have to check for NaN since the result of
// x < x is false regardless. For the others here is some code to check
@@ -1024,13 +1076,14 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if (cc != eq) {
// All-zero means Infinity means equal.
__ Ret(eq, v0, Operand(zero_reg));
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
if (cc == le) {
__ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
} else {
__ li(v0, Operand(LESS)); // NaN >= NaN should fail.
}
}
- __ Ret();
}
// No fall through here.
@@ -1405,12 +1458,14 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&nan);
// NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail.
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
if (cc == lt || cc == le) {
__ li(v0, Operand(GREATER));
} else {
__ li(v0, Operand(LESS));
}
- __ Ret();
+
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
@@ -1500,116 +1555,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
-// The stub expects its argument in the tos_ register and returns its result in
-// it, too: zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label patch;
- const Register map = t5.is(tos_) ? t3 : t5;
-
- // undefined -> false.
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value.
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- __ And(at, tos_, kSmiTagMask);
- // tos_ contains the correct return value already
- __ Ret(eq, at, Operand(zero_reg));
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(tos_, &patch);
- }
-
- if (types_.NeedsMap()) {
- __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, at, Operand(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- __ Movn(tos_, zero_reg, at);
- __ Ret(ne, at, Operand(zero_reg));
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // Spec object -> true.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- // tos_ contains the correct non-zero return value already.
- __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- Label skip;
- __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
- __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
- __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- __ bind(&skip);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // Heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&not_heap_number, ne, map, Operand(at));
- Label zero_or_nan, number;
- __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ bind(&zero_or_nan);
- __ mov(tos_, zero_reg);
- __ bind(&number);
- __ Ret();
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- __ LoadRoot(at, value);
- __ Subu(at, at, tos_); // This is a check for equality for the movz below.
- // The value of a root is never NULL, so we can avoid loading a non-null
- // value into tos_ when we want to return 'true'.
- if (!result) {
- __ Movz(tos_, zero_reg, at);
- }
- __ Ret(eq, at, Operand(zero_reg));
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ Move(a3, tos_);
- __ li(a2, Operand(Smi::FromInt(tos_.code())));
- __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
- __ Push(a3, a2, a1);
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@@ -1784,6 +1729,7 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
if (mode_ == UNARY_OVERWRITE) {
__ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
__ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ Ret(USE_DELAY_SLOT);
__ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
} else {
Label slow_allocate_heapnumber, heapnumber_allocated;
@@ -1805,9 +1751,9 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
__ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a1);
}
- __ Ret();
}
@@ -1827,8 +1773,8 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ Branch(&try_float, lt, a2, Operand(zero_reg));
// Tag the result as a smi and we're done.
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, a1);
- __ Ret();
// Try to store the result in a heap number.
__ bind(&try_float);
@@ -2027,8 +1973,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
// Check that the signed result fits in a Smi.
__ Addu(scratch2, scratch1, Operand(0x40000000));
__ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, scratch1);
- __ Ret();
}
break;
case Token::MOD: {
@@ -2050,8 +1996,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
// Check that the signed result fits in a Smi.
__ Addu(scratch1, scratch2, Operand(0x40000000));
__ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, scratch2);
- __ Ret();
}
break;
case Token::BIT_OR:
@@ -2085,8 +2031,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
__ And(scratch1, v0, Operand(0xc0000000));
__ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
// Smi tag result.
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0);
- __ Ret();
break;
case Token::SHL:
// Remove tags from operands.
@@ -2096,8 +2042,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
// Check that the signed result fits in a Smi.
__ Addu(scratch2, scratch1, Operand(0x40000000));
__ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ SmiTag(v0, scratch1);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot.
break;
default:
UNREACHABLE();
@@ -2299,8 +2245,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// Check that the *signed* result fits in a smi.
__ Addu(a3, a2, Operand(0x40000000));
__ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, a2);
- __ Ret();
// Allocate new heap number for result.
__ bind(&result_not_a_smi);
@@ -2378,7 +2324,16 @@ void BinaryOpStub_GenerateSmiCode(
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smis, call_runtime;
+ Label right_arg_changed, call_runtime;
+
+ if (op_ == Token::MOD && has_fixed_right_arg_) {
+ // It is guaranteed that the value will fit into a Smi, because if it
+ // didn't, we wouldn't be here, see BinaryOp_Patch.
+ __ Branch(&right_arg_changed,
+ ne,
+ a0,
+ Operand(Smi::FromInt(fixed_right_arg_value())));
+ }
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
@@ -2395,6 +2350,7 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
// Code falls through if the result is not returned as either a smi or heap
// number.
+ __ bind(&right_arg_changed);
GenerateTypeTransition(masm);
__ bind(&call_runtime);
@@ -2569,8 +2525,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&not_zero);
// Tag the result and return.
- __ SmiTag(v0, scratch1);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ SmiTag(v0, scratch1); // SmiTag emits one instruction.
} else {
// DIV just falls through to allocating a heap number.
}
@@ -2587,14 +2543,20 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
&call_runtime,
mode_);
+ __ sdc1(f10,
+ FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, heap_number_result);
- __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
// A DIV operation expecting an integer result falls through
// to type transition.
} else {
+ if (has_fixed_right_arg_) {
+ __ Move(f16, fixed_right_arg_value());
+ __ BranchF(&transition, NULL, ne, f14, f16);
+ }
+
// We preserved a0 and a1 to be able to call runtime.
// Save the left value on the stack.
__ Push(t1, t0);
@@ -2704,8 +2666,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// If not try to return a heap number. (We know the result is an int32.)
__ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
// Tag the result and return.
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, a2);
- __ Ret();
__ bind(&return_heap_number);
heap_number_result = t1;
@@ -2728,9 +2690,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
// Store the result.
+ __ sdc1(double_scratch,
+ FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, heap_number_result);
- __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
break;
}
@@ -4168,8 +4131,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ subu(a3, a0, a1);
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, fp, Operand(t3));
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, MemOperand(a3, kDisplacement));
- __ Ret();
// Arguments adaptor case: Check index (a1) against actual arguments
// limit found in the arguments adaptor frame. Use unsigned
@@ -4182,8 +4145,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ subu(a3, a0, a1);
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, a2, Operand(t3));
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, MemOperand(a3, kDisplacement));
- __ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system.
@@ -5072,7 +5035,6 @@ static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// megamorphic.
// a1 : the function to call
// a2 : cache cell for call target
- ASSERT(!FLAG_optimize_constructed_arrays);
Label done;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@@ -6047,16 +6009,18 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
__ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Branch(&check_zero_length, eq, length, Operand(scratch2));
__ bind(&strings_not_equal);
+ ASSERT(is_int16(NOT_EQUAL));
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
- __ Ret();
// Check if the length is zero.
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
__ Branch(&compare_chars, ne, length, Operand(zero_reg));
+ ASSERT(is_int16(EQUAL));
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
// Compare characters.
__ bind(&compare_chars);
@@ -6066,8 +6030,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
&strings_not_equal);
// Characters are equal.
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
}
@@ -6585,14 +6549,15 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
if (GetCondition() == eq) {
// For equality we do not care about the sign of the result.
+ __ Ret(USE_DELAY_SLOT);
__ Subu(v0, a0, a1);
} else {
// Untag before subtracting to avoid handling overflow.
__ SmiUntag(a1);
__ SmiUntag(a0);
+ __ Ret(USE_DELAY_SLOT);
__ Subu(v0, a1, a0);
}
- __ Ret();
__ bind(&miss);
GenerateMiss(masm);
@@ -6653,16 +6618,17 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ BranchF(&fpu_lt, NULL, lt, f0, f2);
// Otherwise it's greater, so just fall thru, and return.
+ ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(GREATER));
- __ Ret();
__ bind(&fpu_eq);
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(EQUAL));
- __ Ret();
__ bind(&fpu_lt);
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(LESS));
- __ Ret();
__ bind(&unordered);
__ bind(&generic_stub);
@@ -6721,8 +6687,9 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ mov(v0, right);
// Internalized strings are compared by identity.
__ Ret(ne, left, Operand(right));
+ ASSERT(is_int16(EQUAL));
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
__ bind(&miss);
GenerateMiss(masm);
@@ -7606,8 +7573,8 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ sll(a1, a1, kPointerSizeLog2);
+ __ Ret(USE_DELAY_SLOT);
__ Addu(sp, sp, a1);
- __ Ret();
}
@@ -7750,6 +7717,10 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
+ if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ }
}
}
@@ -7764,6 +7735,21 @@ void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
}
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things.
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc (only if argument_count_ == ANY)
@@ -7849,6 +7835,102 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ Branch(&not_zero_case, ne, a0, Operand(zero_reg));
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ bind(&not_zero_case);
+ __ Branch(&not_one_case, gt, a0, Operand(1));
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument.
+ __ lw(at, MemOperand(sp, 0));
+ __ Branch(&normal_sequence, eq, at, Operand(zero_reg));
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+ }
+
+ __ bind(&normal_sequence);
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ bind(&not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- a1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ And(at, a3, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function",
+ at, Operand(zero_reg));
+ __ GetObjectType(a3, a3, t0);
+ __ Assert(eq, "Unexpected initial map for Array function",
+ t0, Operand(MAP_TYPE));
+ }
+
+ if (FLAG_optimize_constructed_arrays) {
+ // Figure out the right elements kind.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into a3. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
+ __ Assert(
+ eq, "Invalid ElementsKind for InternalArray or InternalPackedArray",
+ a3, Operand(FAST_HOLEY_ELEMENTS));
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index e874a0872a..72eb00bca4 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -514,50 +514,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
}
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ And(at, index, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi index", at, Operand(zero_reg));
- __ And(at, value, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi value", at, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, String::kLengthOffset));
- __ Check(lt, "Index is too large", index, Operand(at));
-
- __ Check(ge, "Index is negative", index, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
-
- __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
- }
-
- __ Addu(at,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ SmiUntag(value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ SmiUntag(index);
- __ Addu(at, at, index);
- __ sb(value, MemOperand(at));
- } else {
- // No need to untag a smi for two-byte addressing.
- __ Addu(at, at, index);
- __ sh(value, MemOperand(at));
- }
-}
-
-
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index d429443a88..240b02ce44 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -53,7 +53,7 @@ class CodeGenerator: public AstVisitor {
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
+ static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 31fad2bc45..6978cde52b 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -47,7 +47,7 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation nha;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index bdfa43b2e7..7368eada62 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -677,8 +677,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub, condition->test_id());
+ __ mov(a0, result_register());
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
__ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -1083,9 +1084,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ mov(a0, result_register()); // Result as param to InvokeBuiltin below.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1259,6 +1259,67 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+ __ mov(a0, v0);
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(loop_statement.break_label(), eq, a0, Operand(at));
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(loop_statement.break_label(), eq, a0, Operand(at));
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(a0, &convert);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ bind(&convert);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0);
+ __ bind(&done_convert);
+ __ push(a0);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@@ -1975,10 +2036,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 1 * kPointerSize] iter
// [sp + 0 * kPointerSize] g
- Label l_catch, l_try, l_resume, l_send, l_call, l_loop;
+ Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
// Initial send value is undefined.
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Branch(&l_send);
+ __ Branch(&l_next);
// catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
__ bind(&l_catch);
@@ -1988,12 +2049,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(a3); // iter
__ push(a0); // exception
__ mov(a0, a3); // iter
- __ push(a0); // push LoadIC state
__ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(throw_ic); // iter.throw in a0
__ mov(a0, v0);
- __ Addu(sp, sp, Operand(kPointerSize)); // drop LoadIC state
__ jmp(&l_call);
// try { received = yield result.value }
@@ -2015,18 +2074,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_resume); // received in a0
__ PopTryHandler();
- // receiver = iter; f = iter.send; arg = received;
- __ bind(&l_send);
+ // receiver = iter; f = iter.next; arg = received;
+ __ bind(&l_next);
__ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
__ push(a3); // iter
__ push(a0); // received
__ mov(a0, a3); // iter
- __ push(a0); // push LoadIC state
- __ LoadRoot(a2, Heap::ksend_stringRootIndex); // "send"
- Handle<Code> send_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(send_ic); // iter.send in a0
+ __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
+ Handle<Code> next_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(next_ic); // iter.next in a0
__ mov(a0, v0);
- __ Addu(sp, sp, Operand(kPointerSize)); // drop LoadIC state
// result = f.call(receiver, arg);
__ bind(&l_call);
@@ -2056,13 +2113,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(a1); // result
__ push(a0); // result.value
__ mov(a0, a1); // result
- __ push(a0); // push LoadIC state
__ LoadRoot(a2, Heap::kdone_stringRootIndex); // "done"
Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(done_ic); // result.done in v0
- __ Addu(sp, sp, Operand(kPointerSize)); // drop LoadIC state
- ToBooleanStub stub(v0);
- __ CallStub(&stub);
+ __ mov(a0, v0);
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
__ Branch(&l_try, eq, v0, Operand(zero_reg));
// result.value
@@ -2131,7 +2187,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
- if (resume_mode == JSGeneratorObject::SEND) {
+ if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
__ Branch(&slow_resume, ne, a3, Operand(zero_reg));
__ lw(a3, FieldMemOperand(t0, JSFunction::kCodeEntryOffset));
@@ -3037,7 +3093,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// string "valueOf" the result is false.
// The use of t2 to store the valueOf string assumes that it is not otherwise
// used in the loop below.
- __ li(t2, Operand(FACTORY->value_of_string()));
+ __ li(t2, Operand(isolate()->factory()->value_of_string()));
__ jmp(&entry);
__ bind(&loop);
__ lw(a3, MemOperand(t0, 0));
@@ -3445,19 +3501,56 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ __ And(at, index, Operand(kSmiTagMask));
+ __ Check(eq, "Non-smi index", at, Operand(zero_reg));
+ __ And(at, value, Operand(kSmiTagMask));
+ __ Check(eq, "Non-smi value", at, Operand(zero_reg));
+
+ __ lw(at, FieldMemOperand(string, String::kLengthOffset));
+ __ Check(lt, "Index is too large", index, Operand(at));
+
+ __ Check(ge, "Index is negative", index, Operand(zero_reg));
+
+ __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+ __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
+ __ Subu(at, at, Operand(encoding_mask));
+ __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
+}
+
+
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
+ Register string = v0;
+ Register index = a1;
+ Register value = a2;
+
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(a2);
- __ pop(a1);
+ __ pop(value);
+ __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
- context()->Plug(v0);
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
+ __ SmiUntag(value, value);
+ __ Addu(at,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ SmiUntag(index);
+ __ Addu(at, at, index);
+ __ sb(value, MemOperand(at));
+ context()->Plug(string);
}
@@ -3465,15 +3558,29 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
+ Register string = v0;
+ Register index = a1;
+ Register value = a2;
+
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(a2);
- __ pop(a1);
+ __ pop(value);
+ __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
- context()->Plug(v0);
+ if (FLAG_debug_code) {
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ }
+
+ __ SmiUntag(value, value);
+ __ Addu(at,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ Addu(at, at, index);
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ sh(value, MemOperand(at));
+ context()->Plug(string);
}
@@ -4692,19 +4799,15 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- EqualityKind kind = expr->op() == Token::EQ_STRICT
- ? kStrictEquality : kNonStrictEquality;
__ mov(a0, result_register());
- if (kind == kStrictEquality) {
+ if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
__ LoadRoot(a1, nil_value);
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
- kNonStrictEquality,
- nil);
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 8b2b3254f1..c1b4e1e056 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -646,15 +646,11 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
}
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -- a0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
// Probe the stub cache.
@@ -674,7 +670,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- a2 : name
// -- lr : return address
// -- a0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
Label miss;
@@ -695,7 +690,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- a2 : name
// -- ra : return address
// -- a0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
Isolate* isolate = masm->isolate();
@@ -710,6 +704,20 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -----------------------------------
+
+ __ mov(a3, a0);
+ __ Push(a3, a2);
+
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@@ -883,9 +891,6 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
}
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- ra : return address
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index ae0d6283f4..1c8973fe7e 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -500,13 +500,18 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
return constant->handle();
}
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
@@ -516,6 +521,12 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
double LCodeGen::ToDouble(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(constant->HasDoubleValue());
@@ -916,8 +927,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { ALLOW_HANDLE_DEREF(isolate(),
- "copying a ZoneList of handles into a FixedArray");
+ { AllowDeferredHandleDereference copy_handles;
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
@@ -1135,59 +1145,74 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- Register scratch = scratch0();
- const Register left = ToRegister(instr->left());
- const Register result = ToRegister(instr->result());
+ HMod* hmod = instr->hydrogen();
+ HValue* left = hmod->left();
+ HValue* right = hmod->right();
+ if (hmod->HasPowerOf2Divisor()) {
+ const Register scratch = scratch0();
+ const Register left_reg = ToRegister(instr->left());
+ ASSERT(!left_reg.is(scratch));
+ const Register result_reg = ToRegister(instr->result());
+
+ // Note: The code below even works when right contains kMinInt.
+ int32_t divisor = Abs(right->GetInteger32Constant());
+
+ __ mov(scratch, left_reg);
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ Branch(USE_DELAY_SLOT, &left_is_not_negative,
+ ge, left_reg, Operand(zero_reg));
+ __ subu(result_reg, zero_reg, left_reg);
+ __ And(result_reg, result_reg, divisor - 1);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ }
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ subu(result_reg, zero_reg, result_reg);
+ }
- Label done;
+ __ bind(&left_is_not_negative);
+ __ And(result_reg, scratch, divisor - 1);
+ __ bind(&done);
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register scratch = scratch0();
- ASSERT(!left.is(scratch));
- __ mov(scratch, left);
- int32_t p2constant = HConstant::cast(
- instr->hydrogen()->right())->Integer32Value();
- ASSERT(p2constant != 0);
- // Result always takes the sign of the dividend (left).
- p2constant = abs(p2constant);
-
- Label positive_dividend;
- __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
- __ subu(result, zero_reg, left);
- __ And(result, result, p2constant - 1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(result, zero_reg, result);
- __ bind(&positive_dividend);
- __ And(result, scratch, p2constant - 1);
} else {
+ // TODO(svenpanne) Add right->has_fixed_right_arg() case.
+
+ const Register scratch = scratch0();
+ const Register left_reg = ToRegister(instr->left());
+ const Register result_reg = ToRegister(instr->result());
+
// div runs in the background while we check for special cases.
- Register right = EmitLoadRegister(instr->right(), scratch);
- __ div(left, right);
+ Register right_reg = EmitLoadRegister(instr->right(), scratch);
+ __ div(left_reg, right_reg);
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+ Label done;
+ // Check for x % 0, we have to deopt in this case because we can't return a
+ // NaN.
+ if (right->CanBeZero()) {
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
}
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // Check for kMinInt % -1, we have to deopt if we care about -0, because we
+ // can't return that.
+ if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+ __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
+ // TODO(svenpanne) Don't deopt when we don't care about -0.
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
__ bind(&left_not_min_int);
}
- __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
- __ mfhi(result);
+ // TODO(svenpanne) Only emit the test/deopt if we have to.
+ __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
+ __ mfhi(result_reg);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
}
+ __ bind(&done);
}
- __ bind(&done);
}
@@ -1489,7 +1514,11 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
+ __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
__ li(ToRegister(instr->result()), Operand(instr->value()));
}
@@ -1504,7 +1533,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (value->IsSmi()) {
__ li(ToRegister(instr->result()), Operand(value));
} else {
@@ -1600,11 +1629,35 @@ void LCodeGen::DoDateField(LDateField* instr) {
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
+ Register string = ToRegister(instr->string());
+ Register index = ToRegister(instr->index());
+ Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
+ String::Encoding encoding = instr->encoding();
+
+ if (FLAG_debug_code) {
+ __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+ __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
+ }
+
+ __ Addu(scratch,
+ string,
+ Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Addu(at, scratch, index);
+ __ sb(value, MemOperand(at));
+ } else {
+ __ sll(at, index, 1);
+ __ Addu(at, scratch, at);
+ __ sh(value, MemOperand(at));
+ }
}
@@ -1837,10 +1890,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
+ if (r.IsInteger32() || r.IsSmi()) {
+ ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
} else if (r.IsDouble()) {
+ ASSERT(!info()->IsStub());
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
EmitBranchF(true_block, false_block, nue, reg, kDoubleRegZero);
@@ -1849,9 +1904,11 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
__ LoadRoot(at, Heap::kTrueValueRootIndex);
EmitBranch(true_block, false_block, eq, reg, Operand(at));
} else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -2018,11 +2075,23 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
Operand cmp_right = Operand(0);
if (right->IsConstantOperand()) {
- cmp_left = ToRegister(left);
- cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(Smi::FromInt(value));
+ } else {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(value);
+ }
} else if (left->IsConstantOperand()) {
- cmp_left = ToRegister(right);
- cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ cmp_left = ToRegister(right);
+ cmp_right = Operand(Smi::FromInt(value));
+ } else {
+ cmp_left = ToRegister(right);
+ cmp_right = Operand(value);
+ }
// We transposed the operands. Reverse the condition.
cond = ReverseCondition(cond);
} else {
@@ -2537,8 +2606,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(sp, fp);
- __ Pop(ra, fp);
no_frame_start = masm_->pc_offset();
+ __ Pop(ra, fp);
}
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
@@ -2681,7 +2750,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- int offset = instr->hydrogen()->offset();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Register object = ToRegister(instr->object());
if (instr->hydrogen()->representation().IsDouble()) {
DoubleRegister result = ToDoubleRegister(instr->result());
@@ -2690,7 +2760,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
+ if (access.IsInobject()) {
__ lw(result, FieldMemOperand(object, offset));
} else {
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
@@ -2758,9 +2828,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
Label check_passed;
- __ CompareMapAndBranch(
- object_map, map, &check_passed,
- eq, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CompareMapAndBranch(object_map, map, &check_passed, eq, &check_passed);
if (last && !need_generic) {
DeoptimizeIf(al, instr->environment());
__ bind(&check_passed);
@@ -2886,7 +2954,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int additional_offset = instr->additional_index() << element_size_shift;
@@ -2960,7 +3028,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
Register scratch = scratch0();
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int constant_key = 0;
if (key_is_constant) {
@@ -3005,7 +3073,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
__ addu(scratch, elements, scratch);
} else {
@@ -3616,7 +3684,10 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->left()).is(f2));
ASSERT(ToDoubleRegister(instr->result()).is(f0));
- if (exponent_type.IsTagged()) {
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(a2, &no_deopt);
__ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
@@ -3864,14 +3935,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ li(a0, Operand(instr->arity()));
__ li(a2, Operand(instr->hydrogen()->property_cell()));
ElementsKind kind = instr->hydrogen()->elements_kind();
+ bool disable_allocation_sites =
+ (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind);
+ ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
- ArraySingleArgumentConstructorStub stub(kind);
+ ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else {
- ArrayNArgumentsConstructorStub stub(kind);
+ ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -3894,17 +3968,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Register scratch = scratch0();
- int offset = instr->offset();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Handle<Map> transition = instr->transition();
- if (FLAG_track_fields && representation.IsSmi()) {
- Register value = ToRegister(instr->value());
- __ SmiTagCheckOverflow(value, value, scratch);
- if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
- }
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ And(scratch, value, Operand(kSmiTagMask));
@@ -3912,7 +3981,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
- ASSERT(instr->is_in_object());
+ ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
DoubleRegister value = ToDoubleRegister(instr->value());
__ sdc1(value, FieldMemOperand(object, offset));
@@ -3945,7 +4014,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
+ if (access.IsInobject()) {
__ sw(value, FieldMemOperand(object, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
@@ -3996,7 +4065,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
__ li(at, Operand(Smi::FromInt(constant_index)));
} else {
__ li(at, Operand(constant_index));
@@ -4029,7 +4098,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int additional_offset = instr->additional_index() << element_size_shift;
@@ -4107,7 +4176,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
if (key_is_constant) {
__ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
@@ -4157,7 +4226,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
__ addu(scratch, elements, scratch);
} else {
@@ -4407,6 +4476,21 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* output = instr->result();
+ ASSERT(output->IsRegister());
+ Register scratch = scratch0();
+
+ __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ }
+}
+
+
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4618,7 +4702,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
DoubleRegister result_reg,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
@@ -4626,23 +4710,30 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Label load_smi, heap_number, done;
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
+ NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
// Heap number map check.
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (deoptimize_on_undefined) {
+ if (!allow_undefined_as_nan) {
DeoptimizeIf(ne, env, scratch, Operand(at));
} else {
- Label heap_number;
+ Label heap_number, convert;
__ Branch(&heap_number, eq, scratch, Operand(at));
+ // Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
+ __ Branch(&convert, eq, input_reg, Operand(at));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ }
DeoptimizeIf(ne, env, input_reg, Operand(at));
- // Convert undefined to NaN.
+ __ bind(&convert);
__ LoadRoot(at, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
__ Branch(&done);
@@ -4658,14 +4749,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ SmiUntag(scratch, input_reg);
- DeoptimizeIf(Ugreater_equal, env, scratch, Operand(zero_reg));
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- __ Move(result_reg,
- FixedDoubleArray::hole_nan_as_double());
- __ Branch(&done);
} else {
__ SmiUntag(scratch, input_reg);
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
@@ -4798,24 +4881,18 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ } else if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ if (load->hole_mode() == ALLOW_RETURN_HOLE) {
+ mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
}
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
}
}
EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->allow_undefined_as_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment(),
mode);
@@ -4850,10 +4927,62 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
// Deopt if the operation did not succeed (except_flag != 0).
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ Branch(&done, ne, result_reg, Operand(zero_reg));
+ __ mfc1(scratch1, double_input.high());
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ __ bind(&done);
+ }
}
}
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->temp());
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
+
+ if (instr->truncating()) {
+ Register scratch3 = ToRegister(instr->temp2());
+ FPURegister single_scratch = double_scratch0().low();
+ __ EmitECMATruncate(result_reg,
+ double_input,
+ single_scratch,
+ scratch1,
+ scratch2,
+ scratch3);
+ } else {
+ Register except_flag = scratch2;
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ result_reg,
+ double_input,
+ scratch1,
+ double_scratch0(),
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Deopt if the operation did not succeed (except_flag != 0).
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ Branch(&done, ne, result_reg, Operand(zero_reg));
+ __ mfc1(scratch1, double_input.high());
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ __ bind(&done);
+ }
+ }
+ __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
+ DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
+}
+
+
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ And(at, ToRegister(input), Operand(kSmiTagMask));
@@ -4910,7 +5039,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Register reg = ToRegister(instr->value());
Handle<JSFunction> target = instr->hydrogen()->target();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell =
@@ -4928,10 +5057,9 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
void LCodeGen::DoCheckMapCommon(Register map_reg,
Handle<Map> map,
- CompareMapMode mode,
LEnvironment* env) {
Label success;
- __ CompareMapAndBranch(map_reg, map, &success, eq, &success, mode);
+ __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
DeoptimizeIf(al, env);
__ bind(&success);
}
@@ -4947,11 +5075,10 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
- __ CompareMapAndBranch(
- map_reg, map, &success, eq, &success, REQUIRE_EXACT_MAP);
+ __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(map_reg, map, instr->environment());
__ bind(&success);
}
@@ -5023,89 +5150,12 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
__ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
- DoCheckMapCommon(map_reg,
- maps->at(i),
- ALLOW_ELEMENT_TRANSITION_MAPS,
- instr->environment());
+ DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
}
}
}
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Register scratch2 = ToRegister(instr->temp2());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(map, constructor);
- __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
- __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ sw(scratch, FieldMemOperand(result, property_offset));
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ li(a0, Operand(Smi::FromInt(instance_size)));
- __ push(a0);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -5130,7 +5180,10 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
@@ -5169,11 +5222,12 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
- CallRuntimeFromDeferred(
- Runtime::kAllocateInOldPointerSpace, 1, instr);
+ ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace());
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
+ } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) {
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
} else {
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
}
__ StoreToSafepointRegisterSlot(v0, result);
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 90e7bf666b..a208c4009a 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -118,6 +118,7 @@ class LCodeGen BASE_EMBEDDED {
FloatRegister flt_scratch,
DoubleRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
@@ -125,6 +126,7 @@ class LCodeGen BASE_EMBEDDED {
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
@@ -149,13 +151,11 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoCheckMapCommon(Register map_reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ void DoCheckMapCommon(Register map_reg, Handle<Map> map, LEnvironment* env);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -342,7 +342,7 @@ class LCodeGen BASE_EMBEDDED {
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode);
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
index 87efae5f4d..23a8f32f76 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -252,7 +252,9 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32(constant_source)) {
+ if (cgen_->IsSmi(constant_source)) {
+ __ li(dst, Operand(cgen_->ToSmi(constant_source)));
+ } else if (cgen_->IsInteger32(constant_source)) {
__ li(dst, Operand(cgen_->ToInteger32(constant_source)));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
@@ -260,7 +262,9 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsInteger32(constant_source)) {
+ if (cgen_->IsSmi(constant_source)) {
+ __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
+ } else if (cgen_->IsInteger32(constant_source)) {
__ li(kLithiumScratchReg,
Operand(cgen_->ToInteger32(constant_source)));
} else {
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 5ca8cd9b96..ad39c618ea 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -373,8 +373,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ hydrogen()->access().PrintTo(stream);
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -410,7 +409,14 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
} else {
stream->Add("] <- ");
}
- value()->PrintTo(stream);
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
}
@@ -703,6 +709,12 @@ LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
}
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -715,9 +727,9 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ if (instr->representation().IsSmiOrTagged()) {
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
@@ -785,8 +797,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
+ ASSERT(left->representation().IsSmiOrTagged());
+ ASSERT(right->representation().IsSmiOrTagged());
LOperand* left_operand = UseFixed(left, a1);
LOperand* right_operand = UseFixed(right, a0);
LArithmeticT* result =
@@ -1305,9 +1317,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
@@ -1352,43 +1364,45 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LModI* mod;
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
+ ASSERT(!right->CanBeZero());
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseOrConstant(right));
+ LInstruction* result = DefineAsRegister(mod);
+ return (left->CanBeNegative() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero))
+ ? AssignEnvironment(result)
+ : result;
} else {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- mod = new(zone()) LModI(dividend,
- divisor,
- TempRegister(),
- FixedTemp(f20),
- FixedTemp(f22));
+ LModI* mod = new(zone()) LModI(UseRegister(left),
+ UseRegister(right),
+ TempRegister(),
+ FixedTemp(f20),
+ FixedTemp(f22));
+ LInstruction* result = DefineAsRegister(mod);
+ return (right->CanBeZero() ||
+ (left->RangeCanInclude(kMinInt) &&
+ right->RangeCanInclude(-1)) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero))
+ ? AssignEnvironment(result)
+ : result;
}
-
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kCanOverflow)) {
- return AssignEnvironment(DefineAsRegister(mod));
- } else {
- return DefineAsRegister(mod);
- }
- } else if (instr->representation().IsTagged()) {
+ } else if (instr->representation().IsSmiOrTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), f2);
- LOperand* right = UseFixedDouble(instr->right(), f4);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, f2), instr);
+ LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
+ UseFixedDouble(left, f2),
+ UseFixedDouble(right, f4));
+ return MarkAsCall(DefineFixedDouble(mod, f2), instr);
}
}
@@ -1491,7 +1505,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1555,9 +1569,10 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(
+ instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCmpIDAndBranch(left, right);
@@ -1760,12 +1775,26 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
if (from.IsTagged()) {
if (to.IsDouble()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = NULL;
@@ -1800,6 +1829,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToSmi(value,
+ TempRegister(), TempRegister())));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
@@ -1822,6 +1855,15 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return result;
+ }
+ return AssignEnvironment(result);
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1859,18 +1901,6 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
}
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
@@ -1894,7 +1924,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
- ASSERT(input_rep.IsTagged());
+ ASSERT(input_rep.IsSmiOrTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve f22 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(f22));
@@ -1912,7 +1942,9 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
return DefineAsRegister(new(zone()) LConstantD);
@@ -2028,7 +2060,7 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
+ instr->key()->representation().IsSmi());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
@@ -2038,7 +2070,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (instr->representation().IsDouble()) {
obj = UseTempRegister(instr->elements());
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
result = new(zone()) LLoadKeyed(obj, key);
@@ -2088,7 +2120,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
key = UseRegisterOrConstantAtStart(instr->key());
val = UseTempRegister(instr->value());
} else {
- ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
object = UseTempRegister(instr->elements());
val = needs_write_barrier ? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
@@ -2168,13 +2200,14 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
- obj = instr->is_in_object()
+ obj = is_in_object
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
@@ -2198,10 +2231,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject())) {
- return AssignEnvironment(result);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
}
return result;
}
@@ -2245,14 +2279,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result =
- new(zone()) LAllocateObject(TempRegister(), TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
@@ -2342,7 +2368,7 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
index = UseOrConstant(instr->index());
} else {
length = UseTempRegister(instr->length());
- index = Use(instr->index());
+ index = UseRegisterAtStart(instr->index());
}
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 1abea90141..50feee0cb7 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -49,7 +49,6 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
- V(AllocateObject) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -87,6 +86,7 @@ class LCodeGen;
V(CmpT) \
V(ConstantD) \
V(ConstantI) \
+ V(ConstantS) \
V(ConstantT) \
V(Context) \
V(DebugBreak) \
@@ -95,6 +95,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleToSmi) \
V(DummyUse) \
V(ElementsKind) \
V(FixedArrayBaseLength) \
@@ -111,6 +112,7 @@ class LCodeGen;
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Integer32ToSmi) \
V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
@@ -1151,6 +1153,15 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
+class LConstantS: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
class LConstantD: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
@@ -1901,6 +1912,19 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
+class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@@ -1954,6 +1978,25 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
};
+class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDoubleToSmi(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
public:
@@ -2058,9 +2101,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream);
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
Representation representation() const {
return hydrogen()->field_representation();
@@ -2293,7 +2333,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
};
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2357,21 +2397,6 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
- public:
- LAllocateObject(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
class LAllocate: public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index cea4bc4683..d55451b3ec 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -83,7 +83,7 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
@@ -2458,7 +2458,7 @@ void MacroAssembler::Jump(Handle<Code> code,
const Operand& rt,
BranchDelaySlot bd) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
}
@@ -2546,7 +2546,7 @@ int MacroAssembler::CallSize(Handle<Code> code,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
return CallSize(reinterpret_cast<Address>(code.location()),
rmode, cond, rs, rt, bd);
}
@@ -2567,7 +2567,7 @@ void MacroAssembler::Call(Handle<Code> code,
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
@@ -3464,10 +3464,9 @@ void MacroAssembler::CompareMapAndBranch(Register obj,
Handle<Map> map,
Label* early_success,
Condition cond,
- Label* branch_to,
- CompareMapMode mode) {
+ Label* branch_to) {
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMapAndBranch(scratch, map, early_success, cond, branch_to, mode);
+ CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
}
@@ -3475,25 +3474,8 @@ void MacroAssembler::CompareMapAndBranch(Register obj_map,
Handle<Map> map,
Label* early_success,
Condition cond,
- Label* branch_to,
- CompareMapMode mode) {
- Operand right = Operand(map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- Branch(early_success, eq, obj_map, right);
- right = Operand(Handle<Map>(current_map));
- }
- }
- }
-
- Branch(branch_to, cond, obj_map, right);
+ Label* branch_to) {
+ Branch(branch_to, cond, obj_map, Operand(map));
}
@@ -3501,13 +3483,12 @@ void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
- CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
+ CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
bind(&success);
}
@@ -3963,7 +3944,9 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
// (4 bytes) will be placed. This is also built into the Simulator.
// Set up the pointer to the returned value (a0). It was allocated in
// EnterExitFrame.
- addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
+ if (returns_handle) {
+ addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
+ }
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 6511223aae..5e6bfbae43 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -289,7 +289,7 @@ class MacroAssembler: public Assembler {
void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
@@ -1014,8 +1014,7 @@ class MacroAssembler: public Assembler {
Handle<Map> map,
Label* early_success,
Condition cond,
- Label* branch_to,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* branch_to);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
@@ -1023,8 +1022,7 @@ class MacroAssembler: public Assembler {
Handle<Map> map,
Label* early_success,
Condition cond,
- Label* branch_to,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* branch_to);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -1034,8 +1032,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ SmiCheckType smi_check_type);
void CheckMap(Register obj,
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 7289296d56..2961519af2 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -122,7 +122,7 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -235,55 +235,6 @@ void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
}
-void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- if (on_failure == NULL) {
- // Instead of inlining a backtrack for each test, (re)use the global
- // backtrack target.
- on_failure = &backtrack_label_;
- }
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
- if (cp_offset != 0) {
- int byte_offset = cp_offset * char_size();
- __ Addu(a0, a0, Operand(byte_offset));
- }
-
- // a0 : Address of characters to match against str.
- int stored_high_byte = 0;
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
- __ lbu(a1, MemOperand(a0, 0));
- __ addiu(a0, a0, char_size());
- ASSERT(str[i] <= String::kMaxOneByteCharCode);
- BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
- } else {
- __ lhu(a1, MemOperand(a0, 0));
- __ addiu(a0, a0, char_size());
- uc16 match_char = str[i];
- int match_high_byte = (match_char >> 8);
- if (match_high_byte == 0) {
- BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
- } else {
- if (match_high_byte != stored_high_byte) {
- __ li(a2, Operand(match_high_byte));
- stored_high_byte = match_high_byte;
- }
- __ Addu(a3, a2, Operand(match_char & 0xff));
- BranchOrBacktrack(on_failure, ne, a1, Operand(a3));
- }
- }
- }
-}
-
-
void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
Label backtrack_non_equal;
__ lw(a0, MemOperand(backtrack_stackpointer(), 0));
@@ -929,9 +880,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
- Handle<Code> code = FACTORY->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
LOG(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h
index 3ad64f9aeb..86ae4d45ee 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h
@@ -55,10 +55,6 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 18e78a5abd..d8a39ab30c 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1387,12 +1387,19 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
+// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+// struct from the function (which is currently the case). This means we pass
+// the first argument in a1 instead of a0.
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+// Here, we pass the first argument in a0, because this function
+// does not return a struct.
typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0);
// This signature supports direct call to accessor getter callback.
+// See comment at SimulatorRuntimeDirectApiCall.
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
int32_t arg1);
+// See comment at SimulatorRuntimeDirectApiCallNew.
typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0,
int32_t arg1);
@@ -1542,40 +1549,50 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
} else if (
redirection->type() == ExternalReference::DIRECT_API_CALL ||
redirection->type() == ExternalReference::DIRECT_API_CALL_NEW) {
- // See DirectCEntryStub::GenerateCall for explanation of register usage.
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x\n",
- reinterpret_cast<void*>(external), arg1);
- }
if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ // See comment at type definition of SimulatorRuntimeDirectApiCall
+ // for explanation of register usage.
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x\n",
+ reinterpret_cast<void*>(external), arg1);
+ }
SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
v8::Handle<v8::Value> result = target(arg1);
*(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0);
} else {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x\n",
+ reinterpret_cast<void*>(external), arg0);
+ }
SimulatorRuntimeDirectApiCallNew target =
reinterpret_cast<SimulatorRuntimeDirectApiCallNew>(external);
- target(arg1);
+ target(arg0);
}
} else if (
redirection->type() == ExternalReference::DIRECT_GETTER_CALL ||
redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) {
- // See DirectCEntryStub::GenerateCall for explanation of register usage.
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg1, arg2);
- }
if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ // See comment at type definition of SimulatorRuntimeDirectGetterCall
+ // for explanation of register usage.
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ reinterpret_cast<void*>(external), arg1, arg2);
+ }
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
v8::Handle<v8::Value> result = target(arg1, arg2);
*(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0);
} else {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
SimulatorRuntimeDirectGetterCallNew target =
reinterpret_cast<SimulatorRuntimeDirectGetterCallNew>(external);
- target(arg1, arg2);
+ target(arg0, arg1);
}
} else {
SimulatorRuntimeCall target =
@@ -2074,7 +2091,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_fpu_register_double(fd_reg, fs / ft);
break;
case ABS_D:
- set_fpu_register_double(fd_reg, fs < 0 ? -fs : fs);
+ set_fpu_register_double(fd_reg, fabs(fs));
break;
case MOV_D:
set_fpu_register_double(fd_reg, fs);
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 4a73be2dbe..be32744b2e 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -337,8 +337,8 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
__ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
// Load length directly from the JS array.
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Ret();
}
@@ -384,8 +384,8 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
support_wrappers ? &check_wrapper : miss);
// Load length directly from the string.
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
if (support_wrappers) {
// Check if the object is a JSValue wrapper.
@@ -395,8 +395,8 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// Unwrap the value and check if the wrapped value is a string.
__ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
}
}
@@ -407,8 +407,8 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register scratch2,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, scratch1);
- __ Ret();
}
@@ -453,7 +453,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -571,6 +571,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
index -= object->map()->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -596,7 +598,9 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
name_reg,
scratch1,
kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
} else {
// Write to the properties array.
@@ -626,15 +630,17 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
name_reg,
receiver_reg,
kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
}
// Return the value (register v0).
ASSERT(value_reg.is(a0));
__ bind(&exit);
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- __ Ret();
}
@@ -656,7 +662,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -709,12 +715,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
// Return the value (register v0).
ASSERT(value_reg.is(a0));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- __ Ret();
return;
}
// TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -732,7 +740,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
name_reg,
scratch1,
kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
} else {
// Write to the properties array.
@@ -754,15 +764,17 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
name_reg,
receiver_reg,
kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
}
}
// Return the value (register v0).
ASSERT(value_reg.is(a0));
__ bind(&exit);
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- __ Ret();
}
@@ -871,11 +883,12 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[4] : callee JS function
// -- sp[8] : call data
// -- sp[12] : isolate
- // -- sp[16] : ReturnValue
- // -- sp[20] : last JS argument
+ // -- sp[16] : ReturnValue default value
+ // -- sp[20] : ReturnValue
+ // -- sp[24] : last JS argument
// -- ...
- // -- sp[(argc + 4) * 4] : first JS argument
- // -- sp[(argc + 5) * 4] : receiver
+ // -- sp[(argc + 5) * 4] : first JS argument
+ // -- sp[(argc + 6) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@@ -893,15 +906,16 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
}
__ li(t3, Operand(ExternalReference::isolate_address(masm->isolate())));
- // Store JS function, call data, isolate and ReturnValue.
+ // Store JS function, call data, isolate ReturnValue default and ReturnValue.
__ sw(t1, MemOperand(sp, 1 * kPointerSize));
__ sw(t2, MemOperand(sp, 2 * kPointerSize));
__ sw(t3, MemOperand(sp, 3 * kPointerSize));
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
__ sw(t1, MemOperand(sp, 4 * kPointerSize));
+ __ sw(t1, MemOperand(sp, 5 * kPointerSize));
// Prepare arguments.
- __ Addu(a2, sp, Operand(4 * kPointerSize));
+ __ Addu(a2, sp, Operand(5 * kPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -912,28 +926,31 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
// struct from the function (which is currently the case). This means we pass
- // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
- // will handle setting up a0.
+ // the first argument in a1 instead of a0, if returns_handle is true.
+ // CallApiFunctionAndReturn will set up a0.
+
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ bool returns_handle =
+ !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
- // a1 = v8::Arguments&
+ Register first_arg = returns_handle ? a1 : a0;
+
+ // first_arg = v8::Arguments&
// Arguments is built at sp + 1 (sp is a reserved spot for ra).
- __ Addu(a1, sp, kPointerSize);
+ __ Addu(first_arg, sp, kPointerSize);
// v8::Arguments::implicit_args_
- __ sw(a2, MemOperand(a1, 0 * kPointerSize));
+ __ sw(a2, MemOperand(first_arg, 0 * kPointerSize));
// v8::Arguments::values_
__ Addu(t0, a2, Operand(argc * kPointerSize));
- __ sw(t0, MemOperand(a1, 1 * kPointerSize));
+ __ sw(t0, MemOperand(first_arg, 1 * kPointerSize));
// v8::Arguments::length_ = argc
__ li(t0, Operand(argc));
- __ sw(t0, MemOperand(a1, 2 * kPointerSize));
+ __ sw(t0, MemOperand(first_arg, 2 * kPointerSize));
// v8::Arguments::is_construct_call = 0
- __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
+ __ sw(zero_reg, MemOperand(first_arg, 3 * kPointerSize));
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
ApiFunction fun(function_address);
ExternalReference::Type type =
returns_handle ?
@@ -1249,8 +1266,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
Handle<Map> current_map(current->map());
// CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
} else {
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
}
@@ -1286,7 +1302,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
// Check the holder map.
__ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
@@ -1421,23 +1437,31 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
} else {
__ li(scratch3(), Handle<Object>(callback->data(), isolate()));
}
- __ Subu(sp, sp, 5 * kPointerSize);
- __ sw(reg, MemOperand(sp, 4 * kPointerSize));
+ __ Subu(sp, sp, 6 * kPointerSize);
+ __ sw(reg, MemOperand(sp, 5 * kPointerSize));
+ __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
+ __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
__ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
- __ li(scratch3(),
- Operand(ExternalReference::isolate_address(isolate())));
- __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
__ sw(scratch3(), MemOperand(sp, 2 * kPointerSize));
+ __ li(scratch4(),
+ Operand(ExternalReference::isolate_address(isolate())));
__ sw(scratch4(), MemOperand(sp, 1 * kPointerSize));
__ sw(name(), MemOperand(sp, 0 * kPointerSize));
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ bool returns_handle =
+ !CallbackTable::ReturnsVoid(isolate(), getter_address);
+
+ Register first_arg = returns_handle ? a1 : a0;
+ Register second_arg = returns_handle ? a2 : a1;
+
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
- __ mov(a1, sp); // a1 (first argument - see note below) = Handle<Name>
+ __ mov(first_arg, sp); // (first argument - see note below) = Handle<Name>
// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
// struct from the function (which is currently the case). This means we pass
- // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
- // will handle setting up a0.
+ // the arguments in a1-a2 instead of a0-a1, if returns_handle is true.
+ // CallApiFunctionAndReturn will set up a0.
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
@@ -1446,13 +1470,10 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
- // a2 (second argument - see note above) = AccessorInfo&
- __ Addu(a2, sp, kPointerSize);
+ // (second argument - see note above) = AccessorInfo&
+ __ Addu(second_arg, sp, kPointerSize);
const int kStackUnwindSpace = kFastApiCallArguments + 1;
- Address getter_address = v8::ToCData<Address>(callback->getter());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(isolate(), getter_address);
ApiFunction fun(getter_address);
ExternalReference::Type type =
returns_handle ?
@@ -1463,7 +1484,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ CallApiFunctionAndReturn(ref,
kStackUnwindSpace,
returns_handle,
- 3);
+ 5);
}
@@ -1688,8 +1709,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
if (argc == 0) {
// Nothing to do, just return the length.
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
@@ -1737,8 +1757,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ sw(t0, MemOperand(end_elements));
// Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
__ bind(&check_double);
@@ -1770,8 +1789,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
__ bind(&with_write_barrier);
@@ -1837,8 +1855,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
__ bind(&attempt_to_grow_elements);
// v0: array's length + 1.
@@ -1893,8 +1910,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
}
__ bind(&call_builtin);
__ TailCallExternalReference(
@@ -1973,13 +1989,11 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// Fill with the hole.
__ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
__ bind(&return_undefined);
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
__ bind(&call_builtin);
__ TailCallExternalReference(
@@ -2054,8 +2068,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
@@ -2063,8 +2076,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(v0, Heap::kNanValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
}
__ bind(&miss);
@@ -2137,8 +2149,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
@@ -2146,8 +2157,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(v0, Heap::kempty_stringRootIndex);
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
}
__ bind(&miss);
@@ -2213,8 +2223,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
StringCharFromCodeGenerator generator(code, v0);
generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
@@ -2277,8 +2286,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// If the argument is a smi, just return.
STATIC_ASSERT(kSmiTag == 0);
__ And(t0, v0, Operand(kSmiTagMask));
- __ Drop(argc + 1, eq, t0, Operand(zero_reg));
- __ Ret(eq, t0, Operand(zero_reg));
+ __ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
__ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
@@ -2343,8 +2351,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Restore FCSR and return.
__ ctc1(a3, FCSR);
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
__ bind(&wont_fit_smi);
// Restore FCSR and fall to slow case.
@@ -2423,8 +2430,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ Branch(&slow, lt, v0, Operand(zero_reg));
// Smi case done.
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
// Check if the argument is a heap number and load its exponent and
// sign.
@@ -2437,8 +2443,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Label negative_sign;
__ And(t0, a1, Operand(HeapNumber::kSignMask));
__ Branch(&negative_sign, ne, t0, Operand(zero_reg));
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
// If the argument is negative, clear the sign, and return a new
// number.
@@ -2449,8 +2454,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ AllocateHeapNumber(v0, t0, t1, t2, &slow);
__ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
@@ -2849,7 +2853,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
// Check that the map of the object hasn't changed.
__ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -3048,8 +3052,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, t0);
- __ Ret();
// Return the generated code.
return GetICCode(kind(), Code::NORMAL, name);
@@ -3125,157 +3129,6 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // a0 : argc
- // a1 : constructor
- // ra : return address
- // [sp] : last argument
- Label generic_stub_call;
-
- // Use t7 for holding undefined which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ lw(t5, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(t5, SharedFunctionInfo::kDebugInfoOffset));
- __ Branch(&generic_stub_call, ne, a2, Operand(t7));
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- // t7: undefined
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &generic_stub_call);
- __ GetObjectType(a2, a3, t0);
- __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // a0: argc
- // a1: constructor function
- // a2: initial map
- // t7: undefined
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Check(ne, "Function constructed by construct stub.",
- a3, Operand(JS_FUNCTION_TYPE));
-#endif
-
- // Now allocate the JSObject in new space.
- // a0: argc
- // a1: constructor function
- // a2: initial map
- // t7: undefined
- ASSERT(function->has_initial_map());
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-#ifdef DEBUG
- int instance_size = function->initial_map()->instance_size();
- __ Check(eq, "Instance size of initial map changed.",
- a3, Operand(instance_size >> kPointerSizeLog2));
-#endif
- __ Allocate(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // a0: argc
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t7: undefined
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- __ Addu(t5, t5, Operand(3 * kPointerSize));
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
-
- // Calculate the location of the first argument. The stack contains only the
- // argc arguments.
- __ sll(a1, a0, kPointerSizeLog2);
- __ Addu(a1, a1, sp);
-
- // Fill all the in-object properties with undefined.
- // a0: argc
- // a1: first argument
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t5: First in-object property of JSObject (not tagged)
- // t7: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed, next;
- // Check if the argument assigned to the property is actually passed.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
- // Argument passed - find it on the stack.
- __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
- __ sw(a2, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- __ jmp(&next);
- __ bind(&not_passed);
- // Set the property to undefined.
- __ sw(t7, MemOperand(t5));
- __ Addu(t5, t5, Operand(kPointerSize));
- __ bind(&next);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(
- shared->GetThisPropertyAssignmentConstant(i), isolate());
- __ li(a2, Operand(constant));
- __ sw(a2, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ sw(t7, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- }
-
- // a0: argc
- // t4: JSObject (not tagged)
- // Move argc to a1 and the JSObject to return to v0 and tag it.
- __ mov(a1, a0);
- __ mov(v0, t4);
- __ Or(v0, v0, Operand(kHeapObjectTag));
-
- // v0: JSObject
- // a1: argc
- // Remove caller arguments and receiver from the stack and return.
- __ sll(t0, a1, kPointerSizeLog2);
- __ Addu(sp, sp, t0);
- __ Addu(sp, sp, Operand(kPointerSize));
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
- __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
- __ Ret();
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> generic_construct_stub =
- isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -3471,8 +3324,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
// Entry registers are intact, a0 holds the value which is the return value.
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- __ Ret();
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
// a3: external array.
@@ -3539,8 +3392,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// Entry registers are intact, a0 holds the value
// which is the return value.
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- __ Ret();
}
// Slow case, key and receiver still in a0 and a1.
@@ -3701,8 +3554,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Increment the length of the array.
__ li(length_reg, Operand(Smi::FromInt(1)));
+ __ Ret(USE_DELAY_SLOT);
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret();
__ bind(&check_capacity);
// Check for cow elements, in general they are not handled by this stub
@@ -3866,9 +3719,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Increment the length of the array.
__ li(length_reg, Operand(Smi::FromInt(1)));
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ Ret(USE_DELAY_SLOT);
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ Ret();
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index 7592a89531..978ea217bd 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -32,10 +32,6 @@
#endif
#include <signal.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "bootstrapper.h"
@@ -341,10 +337,10 @@ int main(int argc, char** argv) {
exit(1);
}
if (i::FLAG_extra_code != NULL) {
- context->Enter();
// Capture 100 frames if anything happens.
V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
HandleScope scope(isolate);
+ v8::Context::Scope(v8::Local<v8::Context>::New(isolate, context));
const char* name = i::FLAG_extra_code;
FILE* file = i::OS::FOpen(name, "rb");
if (file == NULL) {
@@ -381,7 +377,6 @@ int main(int argc, char** argv) {
DumpException(try_catch.Message());
exit(1);
}
- context->Exit();
}
// Make sure all builtin scripts are cached.
{ HandleScope scope(isolate);
@@ -393,7 +388,7 @@ int main(int argc, char** argv) {
// context even after we have disposed of the context.
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
- context.Dispose(context->GetIsolate());
+ context.Dispose(isolate);
CppByteSink sink(argv[1]);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index b28f928a48..ada7919d6d 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -294,7 +294,7 @@ function EndPerformSplice(array) {
EndPerformChange(objectInfo, 'splice');
}
-function EnqueueSpliceRecord(array, index, removed, deleteCount, addedCount) {
+function EnqueueSpliceRecord(array, index, removed, addedCount) {
var objectInfo = objectInfoMap.get(array);
if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0)
return;
@@ -307,11 +307,8 @@ function EnqueueSpliceRecord(array, index, removed, deleteCount, addedCount) {
addedCount: addedCount
};
- changeRecord.removed.length = deleteCount;
- // TODO(rafaelw): This breaks spec-compliance. Re-enable when freezing isn't
- // slow.
- // ObjectFreeze(changeRecord);
- // ObjectFreeze(changeRecord.removed);
+ ObjectFreeze(changeRecord);
+ ObjectFreeze(changeRecord.removed);
EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
}
@@ -323,9 +320,7 @@ function NotifyChange(type, object, name, oldValue) {
var changeRecord = (arguments.length < 4) ?
{ type: type, object: object, name: name } :
{ type: type, object: object, name: name, oldValue: oldValue };
- // TODO(rafaelw): This breaks spec-compliance. Re-enable when freezing isn't
- // slow.
- // ObjectFreeze(changeRecord);
+ ObjectFreeze(changeRecord);
EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
}
@@ -351,9 +346,7 @@ function ObjectNotifierNotify(changeRecord) {
%DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
READ_ONLY + DONT_DELETE);
}
- // TODO(rafaelw): This breaks spec-compliance. Re-enable when freezing isn't
- // slow.
- // ObjectFreeze(newRecord);
+ ObjectFreeze(newRecord);
EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 891f0d2302..4008181bad 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -312,8 +312,9 @@ void JSObject::JSObjectVerify() {
Representation r = descriptors->GetDetails(i).representation();
int field = descriptors->GetFieldIndex(i);
Object* value = RawFastPropertyAt(field);
- if (r.IsSmi()) ASSERT(value->IsSmi());
if (r.IsDouble()) ASSERT(value->IsHeapNumber());
+ if (value->IsUninitialized()) continue;
+ if (r.IsSmi()) ASSERT(value->IsSmi());
if (r.IsHeapObject()) ASSERT(value->IsHeapObject());
}
}
@@ -777,6 +778,12 @@ void Foreign::ForeignVerify() {
}
+void Box::BoxVerify() {
+ CHECK(IsBox());
+ value()->Verify();
+}
+
+
void AccessorInfo::AccessorInfoVerify() {
VerifyPointer(name());
VerifyPointer(flag());
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 95a0eca6a8..e60f0f36f1 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -58,7 +58,10 @@ PropertyDetails::PropertyDetails(Smi* smi) {
Smi* PropertyDetails::AsSmi() {
- return Smi::FromInt(value_);
+ // Ensure the upper 2 bits have the same value by sign extending it. This is
+ // necessary to be able to use the 31st bit of the property details.
+ int value = value_ << 1;
+ return Smi::FromInt(value >> 1);
}
@@ -289,6 +292,9 @@ MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
PretenureFlag tenure) {
if (!FLAG_track_double_fields) return this;
if (!representation.IsDouble()) return this;
+ if (IsUninitialized()) {
+ return heap->AllocateHeapNumber(0, tenure);
+ }
return heap->AllocateHeapNumber(Number(), tenure);
}
@@ -527,6 +533,11 @@ bool MaybeObject::IsTheHole() {
}
+bool MaybeObject::IsUninitialized() {
+ return !IsFailure() && ToObjectUnchecked()->IsUninitialized();
+}
+
+
Failure* Failure::cast(MaybeObject* obj) {
ASSERT(HAS_FAILURE_TAG(obj));
return reinterpret_cast<Failure*>(obj);
@@ -842,6 +853,11 @@ bool Object::IsTheHole() {
}
+bool Object::IsUninitialized() {
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized;
+}
+
+
bool Object::IsTrue() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
}
@@ -892,7 +908,7 @@ MaybeObject* Object::GetElement(uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
- ASSERT(HEAP->IsAllocationAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
return GetElementWithReceiver(this, index);
}
@@ -1538,12 +1554,19 @@ MaybeObject* JSObject::MigrateInstance() {
// Converting any field to the most specific type will cause the
// GeneralizeFieldRepresentation algorithm to create the most general existing
// transition that matches the object. This achieves what is needed.
- return GeneralizeFieldRepresentation(0, Representation::Smi());
+ return GeneralizeFieldRepresentation(0, Representation::None());
+}
+
+
+MaybeObject* JSObject::TryMigrateInstance() {
+ Map* new_map = map()->CurrentMapForDeprecated();
+ if (new_map == NULL) return Smi::FromInt(0);
+ return MigrateToMap(new_map);
}
Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
TransitionArray* transitions = map->transitions();
if (!transitions->IsSimpleTransition()) return Handle<String>::null();
@@ -1565,7 +1588,7 @@ Handle<Map> JSObject::ExpectedTransitionTarget(Handle<Map> map) {
Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
if (!map->HasTransitionArray()) return Handle<Map>::null();
TransitionArray* transitions = map->transitions();
int transition = transitions->Search(*key);
@@ -1977,7 +2000,8 @@ bool FixedDoubleArray::is_the_hole(int index) {
}
-WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
+WriteBarrierMode HeapObject::GetWriteBarrierMode(
+ const DisallowHeapAllocation& promise) {
Heap* heap = GetHeap();
if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
@@ -2320,7 +2344,7 @@ PropertyType DescriptorArray::GetType(int descriptor_number) {
int DescriptorArray::GetFieldIndex(int descriptor_number) {
- return Descriptor::IndexFromValue(GetValue(descriptor_number));
+ return GetDetails(descriptor_number).field_index();
}
@@ -2355,7 +2379,6 @@ void DescriptorArray::Set(int descriptor_number,
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(!desc->GetDetails().representation().IsNone());
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
desc->GetKey());
@@ -2371,7 +2394,6 @@ void DescriptorArray::Set(int descriptor_number,
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(!desc->GetDetails().representation().IsNone());
set(ToKeyIndex(descriptor_number), desc->GetKey());
set(ToValueIndex(descriptor_number), desc->GetValue());
@@ -3592,10 +3614,23 @@ bool Map::is_deprecated() {
}
+void Map::freeze() {
+ set_bit_field3(IsFrozen::update(bit_field3(), true));
+}
+
+
+bool Map::is_frozen() {
+ return IsFrozen::decode(bit_field3());
+}
+
+
bool Map::CanBeDeprecated() {
int descriptor = LastAdded();
for (int i = 0; i <= descriptor; i++) {
PropertyDetails details = instance_descriptors()->GetDetails(i);
+ if (FLAG_track_fields && details.representation().IsNone()) {
+ return true;
+ }
if (FLAG_track_fields && details.representation().IsSmi()) {
return true;
}
@@ -3712,6 +3747,7 @@ Code::ExtraICState Code::extra_ic_state() {
Code::ExtraICState Code::extended_extra_ic_state() {
ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
+ ASSERT(needs_extended_extra_ic_state(kind()));
return ExtractExtendedExtraICStateFromFlags(flags());
}
@@ -3962,17 +3998,7 @@ void Code::set_unary_op_type(byte value) {
byte Code::to_boolean_state() {
- ASSERT(is_to_boolean_ic_stub());
- return ToBooleanStateField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_to_boolean_state(byte value) {
- ASSERT(is_to_boolean_ic_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = ToBooleanStateField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+ return extended_extra_ic_state();
}
@@ -4035,10 +4061,7 @@ Code::Flags Code::ComputeFlags(Kind kind,
| TypeField::encode(type)
| ExtendedExtraICStateField::encode(extra_ic_state)
| CacheHolderField::encode(holder);
- // TODO(danno): This is a bit of a hack right now since there are still
- // clients of this API that pass "extra" values in for argc. These clients
- // should be retrofitted to used ExtendedExtraICState.
- if (kind != Code::COMPARE_NIL_IC) {
+ if (!Code::needs_extended_extra_ic_state(kind)) {
bits |= (argc << kArgumentsCountShift);
}
return static_cast<Flags>(bits);
@@ -4360,6 +4383,8 @@ ACCESSORS(ExecutableAccessorInfo, getter, Object, kGetterOffset)
ACCESSORS(ExecutableAccessorInfo, setter, Object, kSetterOffset)
ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset)
+ACCESSORS(Box, value, Object, kValueOffset)
+
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -4451,8 +4476,6 @@ ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
-ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
- kThisPropertyAssignmentsOffset)
SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
@@ -4469,10 +4492,6 @@ BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
kIsTopLevelBit)
-BOOL_GETTER(SharedFunctionInfo,
- compiler_hints,
- has_only_simple_this_property_assignments,
- kHasOnlySimpleThisPropertyAssignments)
BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
allows_lazy_compilation,
@@ -4505,8 +4524,6 @@ SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
- kThisPropertyAssignmentsCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
SMI_ACCESSORS(SharedFunctionInfo,
@@ -4558,13 +4575,10 @@ PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
compiler_hints,
kCompilerHintsOffset)
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- this_property_assignments_count,
- kThisPropertyAssignmentsCountOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, opt_count, kOptCountOffset)
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, counters, kCountersOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, counters, kCountersOffset)
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
stress_deopt_counter,
kStressDeoptCounterOffset)
#endif
@@ -5303,13 +5317,28 @@ void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
+ACCESSORS_TO_SMI(JSArrayBuffer, flag, kFlagOffset)
+
+
+bool JSArrayBuffer::is_external() {
+ return BooleanBit::get(flag(), kIsExternalBit);
+}
+
+
+void JSArrayBuffer::set_is_external(bool value) {
+ set_flag(BooleanBit::set(flag(), kIsExternalBit, value));
+}
+
+
+ACCESSORS(JSArrayBuffer, weak_next, Object, kWeakNextOffset)
+ACCESSORS(JSArrayBuffer, weak_first_array, Object, kWeakFirstArrayOffset)
ACCESSORS(JSTypedArray, buffer, Object, kBufferOffset)
ACCESSORS(JSTypedArray, byte_offset, Object, kByteOffsetOffset)
ACCESSORS(JSTypedArray, byte_length, Object, kByteLengthOffset)
ACCESSORS(JSTypedArray, length, Object, kLengthOffset)
-
+ACCESSORS(JSTypedArray, weak_next, Object, kWeakNextOffset)
ACCESSORS(JSRegExp, data, Object, kDataOffset)
@@ -5811,7 +5840,7 @@ void Dictionary<Shape, Key>::SetEntry(int entry,
details.IsDeleted() ||
details.dictionary_index() > 0);
int index = HashTable<Shape, Key>::EntryToIndex(entry);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
FixedArray::set(index, key, mode);
FixedArray::set(index+1, value, mode);
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index ddecae3bf5..357d984a13 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -547,6 +547,8 @@ static const char* TypeToString(InstanceType type) {
case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
+ case JS_TYPED_ARRAY_TYPE: return "JS_TYPED_ARRAY";
+ case JS_ARRAY_BUFFER_TYPE: return "JS_ARRAY_BUFFER";
case FOREIGN_TYPE: return "FOREIGN";
case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
@@ -822,7 +824,7 @@ void JSTypedArray::JSTypedArrayPrint(FILE* out) {
byte_offset()->ShortPrint(out);
PrintF(out, "\n - byte_length = ");
byte_length()->ShortPrint(out);
- PrintF(out, " - length = ");
+ PrintF(out, "\n - length = ");
length()->ShortPrint(out);
PrintF("\n");
PrintElements(out);
@@ -885,10 +887,8 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
PrintF(out, "\n - debug info = ");
debug_info()->ShortPrint(out);
PrintF(out, "\n - length = %d", length());
- PrintF(out, "\n - has_only_simple_this_property_assignments = %d",
- has_only_simple_this_property_assignments());
- PrintF(out, "\n - this_property_assignments = ");
- this_property_assignments()->ShortPrint(out);
+ PrintF(out, "\n - optimized_code_map = ");
+ optimized_code_map()->ShortPrint(out);
PrintF(out, "\n");
}
@@ -970,6 +970,13 @@ void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint(FILE* out) {
}
+void Box::BoxPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "Box");
+ PrintF(out, "\n - value: ");
+ value()->ShortPrint(out);
+}
+
+
void AccessorPair::AccessorPairPrint(FILE* out) {
HeapObject::PrintHeader(out, "AccessorPair");
PrintF(out, "\n - getter: ");
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index f83f00fd5d..829eab809f 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -79,6 +79,10 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitJSFunction, &VisitJSFunction);
+ table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+
+ table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+
table_.Register(kVisitFreeSpace, &VisitFreeSpace);
table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
@@ -99,6 +103,43 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
template<typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ STATIC_ASSERT(
+ JSArrayBuffer::kWeakFirstArrayOffset ==
+ JSArrayBuffer::kWeakNextOffset + kPointerSize);
+ VisitPointers(
+ heap,
+ HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
+ VisitPointers(
+ heap,
+ HeapObject::RawField(object,
+ JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
+ HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+ return JSArrayBuffer::kSizeWithInternalFields;
+}
+
+
+template<typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
+ Map* map, HeapObject* object) {
+ VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
+ VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object,
+ JSTypedArray::kWeakNextOffset + kPointerSize),
+ HeapObject::RawField(object, JSTypedArray::kSize));
+ return JSTypedArray::kSize;
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticVisitor,
@@ -149,6 +190,10 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitJSFunction, &VisitJSFunction);
+ table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+
+ table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+
// Registration for kVisitJSRegExp is done by StaticVisitor.
table_.Register(kVisitPropertyCell,
@@ -401,6 +446,41 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ STATIC_ASSERT(
+ JSArrayBuffer::kWeakFirstArrayOffset ==
+ JSArrayBuffer::kWeakNextOffset + kPointerSize);
+ StaticVisitor::VisitPointers(
+ heap,
+ HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
+ StaticVisitor::VisitPointers(
+ heap,
+ HeapObject::RawField(object,
+ JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
+ HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
+ Map* map, HeapObject* object) {
+ StaticVisitor::VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
+ StaticVisitor::VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object,
+ JSTypedArray::kWeakNextOffset + kPointerSize),
+ HeapObject::RawField(object, JSTypedArray::kSize));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
Heap* heap, Map* map) {
// Make sure that the back pointer stored either in the map itself or
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index 7b5c8bef75..4bf2804584 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -134,6 +134,12 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case FILLER_TYPE:
return kVisitDataObjectGeneric;
+ case JS_ARRAY_BUFFER_TYPE:
+ return kVisitJSArrayBuffer;
+
+ case JS_TYPED_ARRAY_TYPE:
+ return kVisitJSTypedArray;
+
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -145,8 +151,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
- case JS_ARRAY_BUFFER_TYPE:
- case JS_TYPED_ARRAY_TYPE:
return GetVisitorIdForSize(kVisitJSObject,
kVisitJSObjectGeneric,
instance_size);
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index d4a2ed2d16..c4d1cc3be1 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -92,6 +92,8 @@ class StaticVisitorBase : public AllStatic {
V(SharedFunctionInfo) \
V(JSFunction) \
V(JSWeakMap) \
+ V(JSArrayBuffer) \
+ V(JSTypedArray) \
V(JSRegExp)
// For data objects, JS objects and structs along with generic visitor which
@@ -333,6 +335,9 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return FreeSpace::cast(object)->Size();
}
+ INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
+ INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
+
class DataObjectVisitor {
public:
template<int object_size>
@@ -407,6 +412,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
+ INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
+ INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
// Mark pointers in a Map and its TransitionArray together, possibly
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 31bbbdbeee..6512c60779 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -155,6 +155,41 @@ MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
}
+bool Object::ToInt32(int32_t* value) {
+ if (IsSmi()) {
+ *value = Smi::cast(this)->value();
+ return true;
+ }
+ if (IsHeapNumber()) {
+ double num = HeapNumber::cast(this)->value();
+ if (FastI2D(FastD2I(num)) == num) {
+ *value = FastD2I(num);
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool Object::ToUint32(uint32_t* value) {
+ if (IsSmi()) {
+ int num = Smi::cast(this)->value();
+ if (num >= 0) {
+ *value = static_cast<uint32_t>(num);
+ return true;
+ }
+ }
+ if (IsHeapNumber()) {
+ double num = HeapNumber::cast(this)->value();
+ if (num >= 0 && FastUI2D(FastD2UI(num)) == num) {
+ *value = FastD2UI(num);
+ return true;
+ }
+ }
+ return false;
+}
+
+
template<typename To>
static inline To* CheckedCast(void *from) {
uintptr_t temp = reinterpret_cast<uintptr_t>(from);
@@ -344,6 +379,8 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate,
+ v8::ToCData<Address>(fun_obj));
result = args.Call(call_fun, v8::Utils::ToLocal(key));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
@@ -1045,7 +1082,7 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
// allowed. This is to avoid an assertion failure when allocating.
// Flattening strings is the only case where we always allow
// allocation because no GC is performed if the allocation fails.
- if (!HEAP->IsAllocationAllowed()) return this;
+ if (!AllowHeapAllocation::IsAllowed()) return this;
#endif
Heap* heap = GetHeap();
@@ -1168,6 +1205,11 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
+ if (this->IsTwoByteRepresentation()) {
+ ScopedVector<uint16_t> smart_chars(this->length());
+ String::WriteToFlat(this, smart_chars.start(), 0, this->length());
+ ASSERT(String::IsOneByte(smart_chars.start(), this->length()));
+ }
ScopedVector<char> smart_chars(this->length());
String::WriteToFlat(this, smart_chars.start(), 0, this->length());
ASSERT(memcmp(smart_chars.start(),
@@ -1775,7 +1817,8 @@ static bool IsIdentifier(UnicodeCache* cache, Name* name) {
MaybeObject* JSObject::AddFastProperty(Name* name,
Object* value,
PropertyAttributes attributes,
- StoreFromKeyed store_mode) {
+ StoreFromKeyed store_mode,
+ ValueType value_type) {
ASSERT(!IsJSGlobalProxy());
ASSERT(DescriptorArray::kNotFound ==
map()->instance_descriptors()->Search(
@@ -1801,8 +1844,8 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
int index = map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- Representation representation = IsJSContextExtensionObject()
- ? Representation::Tagged() : value->OptimalRepresentation();
+ if (IsJSContextExtensionObject()) value_type = FORCE_TAGGED;
+ Representation representation = value->OptimalRepresentation(value_type);
FieldDescriptor new_field(name, index, attributes, representation);
@@ -1824,15 +1867,18 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
Heap* heap = isolate->heap();
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
Object* storage;
MaybeObject* maybe_storage =
value->AllocateNewStorageFor(heap, representation);
if (!maybe_storage->To(&storage)) return maybe_storage;
+ // Note that Map::CopyAddDescriptor has side-effects, the new map is already
+ // inserted in the transition tree. No more allocations that might fail are
+ // allowed after this point.
+ Map* new_map;
+ MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
if (map()->unused_property_fields() == 0) {
ASSERT(values != NULL);
set_properties(values);
@@ -1916,7 +1962,8 @@ MaybeObject* JSObject::AddProperty(Name* name,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
JSReceiver::StoreFromKeyed store_mode,
- ExtensibilityCheck extensibility_check) {
+ ExtensibilityCheck extensibility_check,
+ ValueType value_type) {
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
Heap* heap = GetHeap();
@@ -1929,8 +1976,8 @@ MaybeObject* JSObject::AddProperty(Name* name,
} else {
Handle<Object> args[1] = {Handle<Name>(name)};
return isolate->Throw(
- *FACTORY->NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
+ *isolate->factory()->NewTypeError("object_not_extensible",
+ HandleVector(args, 1)));
}
}
@@ -1943,7 +1990,8 @@ MaybeObject* JSObject::AddProperty(Name* name,
JSFunction::cast(value),
attributes);
} else {
- result = AddFastProperty(name, value, attributes, store_mode);
+ result = AddFastProperty(
+ name, value, attributes, store_mode, value_type);
}
} else {
// Normalize the object to prevent very large instance descriptors.
@@ -2227,7 +2275,7 @@ bool Map::InstancesNeedRewriting(Map* target,
int limit = NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
if (new_desc->GetDetails(i).representation().IsDouble() &&
- old_desc->GetDetails(i).representation().IsSmi()) {
+ !old_desc->GetDetails(i).representation().IsDouble()) {
return true;
}
}
@@ -2298,8 +2346,9 @@ MaybeObject* JSObject::MigrateToMap(Map* new_map) {
? old_descriptors->GetValue(i)
: RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
if (FLAG_track_double_fields &&
- old_details.representation().IsSmi() &&
+ !old_details.representation().IsDouble() &&
details.representation().IsDouble()) {
+ if (old_details.representation().IsNone()) value = Smi::FromInt(0);
// Objects must be allocated in the old object space, since the
// overall number of HeapNumbers needed for the conversion might
// exceed the capacity of new space, and we would fail repeatedly
@@ -2352,7 +2401,7 @@ MaybeObject* JSObject::GeneralizeFieldRepresentation(
MaybeObject* maybe_new_map =
map()->GeneralizeRepresentation(modify_index, new_representation);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- ASSERT(map() != new_map || new_map->FindRootMap()->is_deprecated());
+ if (map() == new_map) return this;
return MigrateToMap(new_map);
}
@@ -2497,8 +2546,6 @@ Map* Map::FindLastMatchMap(int verbatim,
if (details.type() != next_details.type()) break;
if (details.attributes() != next_details.attributes()) break;
if (!details.representation().Equals(next_details.representation())) break;
- ASSERT(!details.IsDeleted());
- ASSERT(!next_details.IsDeleted());
current = next;
}
@@ -2531,10 +2578,21 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
Representation old_representation =
old_descriptors->GetDetails(modify_index).representation();
- if (old_representation.IsNone()) {
- UNREACHABLE();
+ // It's fine to transition from None to anything but double without any
+ // modification to the object, because the default uninitialized value for
+ // representation None can be overwritten by both smi and tagged values.
+ // Doubles, however, would require a box allocation.
+ if (old_representation.IsNone() &&
+ !new_representation.IsNone() &&
+ !new_representation.IsDouble()) {
+ if (FLAG_trace_generalization) {
+ PrintF("initializing representation %i: %p -> %s\n",
+ modify_index,
+ static_cast<void*>(this),
+ new_representation.Mnemonic());
+ }
old_descriptors->SetRepresentation(modify_index, new_representation);
- return this;
+ return old_map;
}
int descriptors = old_map->NumberOfOwnDescriptors();
@@ -2560,7 +2618,7 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
updated_descriptors->GetDetails(modify_index).representation();
if (new_representation.fits_into(updated_representation)) {
if (FLAG_trace_generalization &&
- !(modify_index == 0 && new_representation.IsSmi())) {
+ !(modify_index == 0 && new_representation.IsNone())) {
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
PrintF("migrating to existing map %p(%s) -> %p(%s)\n",
static_cast<void*>(this),
@@ -2598,7 +2656,7 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
old_descriptors->GetKey(descriptor), new_descriptors);
if (FLAG_trace_generalization &&
- !(modify_index == 0 && new_representation.IsSmi())) {
+ !(modify_index == 0 && new_representation.IsNone())) {
PrintF("migrating to new map %i: %p(%s) -> %p(%s) (%i steps)\n",
modify_index,
static_cast<void*>(this),
@@ -2628,7 +2686,7 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
Map* Map::CurrentMapForDeprecated() {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
if (!is_deprecated()) return this;
DescriptorArray* old_descriptors = instance_descriptors();
@@ -2788,6 +2846,8 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate,
+ v8::ToCData<Address>(call_obj));
args.Call(call_fun,
v8::Utils::ToLocal(key),
v8::Utils::ToLocal(value_handle));
@@ -3644,6 +3704,19 @@ void JSObject::MigrateInstance(Handle<JSObject> object) {
}
+Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
+ if (FLAG_trace_migration) {
+ PrintF("migrating instance (no new maps) %p (%p)\n",
+ static_cast<void*>(*object),
+ static_cast<void*>(object->map()));
+ }
+ CALL_HEAP_FUNCTION(
+ object->GetIsolate(),
+ object->MigrateInstance(),
+ Object);
+}
+
+
Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map,
int modify_index,
Representation representation) {
@@ -3875,10 +3948,12 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ ValueType value_type) {
CALL_HEAP_FUNCTION(
object->GetIsolate(),
- object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
+ object->SetLocalPropertyIgnoreAttributes(
+ *key, *value, attributes, value_type),
Object);
}
@@ -3886,7 +3961,8 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
Name* name_raw,
Object* value_raw,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ ValueType value_type) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
@@ -3912,13 +3988,16 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes(
name_raw,
value_raw,
- attributes);
+ attributes,
+ value_type);
}
// Check for accessor in prototype chain removed here in clone.
if (!lookup.IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(name_raw, value_raw, attributes, kNonStrictMode);
+ return AddProperty(
+ name_raw, value_raw, attributes, kNonStrictMode,
+ MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK, value_type);
}
// From this point on everything needs to be handlified.
@@ -3945,9 +4024,12 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
}
case FIELD: {
Representation representation = lookup.representation();
- if (!value->FitsRepresentation(representation)) {
+ Representation value_representation =
+ value->OptimalRepresentation(value_type);
+ if (value_representation.IsNone()) break;
+ if (!value_representation.fits_into(representation)) {
MaybeObject* maybe_failure = self->GeneralizeFieldRepresentation(
- lookup.GetDescriptorIndex(), value->OptimalRepresentation());
+ lookup.GetDescriptorIndex(), value_representation);
if (maybe_failure->IsFailure()) return maybe_failure;
DescriptorArray* desc = self->map()->instance_descriptors();
int descriptor = lookup.GetDescriptorIndex();
@@ -3988,9 +4070,11 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
Representation representation = details.representation();
- if (!value->FitsRepresentation(representation)) {
+ Representation value_representation =
+ value->OptimalRepresentation(value_type);
+ if (!value_representation.fits_into(representation)) {
MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
- descriptor, value->OptimalRepresentation());
+ descriptor, value_representation);
if (!maybe_map->To(&transition_map)) return maybe_map;
Object* back = transition_map->GetBackPointer();
if (back->IsMap()) {
@@ -4507,6 +4591,42 @@ MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
}
+static MUST_USE_RESULT MaybeObject* CopyFastElementsToDictionary(
+ Isolate* isolate,
+ FixedArrayBase* array,
+ int length,
+ SeededNumberDictionary* dictionary) {
+ Heap* heap = isolate->heap();
+ bool has_double_elements = array->IsFixedDoubleArray();
+ for (int i = 0; i < length; i++) {
+ Object* value = NULL;
+ if (has_double_elements) {
+ FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
+ if (double_array->is_the_hole(i)) {
+ value = isolate->heap()->the_hole_value();
+ } else {
+ // Objects must be allocated in the old object space, since the
+ // overall number of HeapNumbers needed for the conversion might
+ // exceed the capacity of new space, and we would fail repeatedly
+ // trying to convert the FixedDoubleArray.
+ MaybeObject* maybe_value_object =
+ heap->AllocateHeapNumber(double_array->get_scalar(i), TENURED);
+ if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
+ }
+ } else {
+ value = FixedArray::cast(array)->get(i);
+ }
+ if (!value->IsTheHole()) {
+ PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
+ MaybeObject* maybe_result =
+ dictionary->AddNumberEntry(i, value, details);
+ if (!maybe_result->To(&dictionary)) return maybe_result;
+ }
+ }
+ return dictionary;
+}
+
+
Handle<SeededNumberDictionary> JSObject::NormalizeElements(
Handle<JSObject> object) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
@@ -4538,44 +4658,14 @@ MaybeObject* JSObject::NormalizeElements() {
int old_capacity = 0;
int used_elements = 0;
GetElementsCapacityAndUsage(&old_capacity, &used_elements);
- SeededNumberDictionary* dictionary = NULL;
- { Object* object;
- MaybeObject* maybe =
- SeededNumberDictionary::Allocate(GetHeap(), used_elements);
- if (!maybe->ToObject(&object)) return maybe;
- dictionary = SeededNumberDictionary::cast(object);
- }
+ SeededNumberDictionary* dictionary;
+ MaybeObject* maybe_dictionary =
+ SeededNumberDictionary::Allocate(GetHeap(), used_elements);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- // Copy the elements to the new backing store.
- bool has_double_elements = array->IsFixedDoubleArray();
- for (int i = 0; i < length; i++) {
- Object* value = NULL;
- if (has_double_elements) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
- if (double_array->is_the_hole(i)) {
- value = GetIsolate()->heap()->the_hole_value();
- } else {
- // Objects must be allocated in the old object space, since the
- // overall number of HeapNumbers needed for the conversion might
- // exceed the capacity of new space, and we would fail repeatedly
- // trying to convert the FixedDoubleArray.
- MaybeObject* maybe_value_object =
- GetHeap()->AllocateHeapNumber(double_array->get_scalar(i), TENURED);
- if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
- }
- } else {
- ASSERT(old_map->has_fast_smi_or_object_elements());
- value = FixedArray::cast(array)->get(i);
- }
- PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
- if (!value->IsTheHole()) {
- Object* result;
- MaybeObject* maybe_result =
- dictionary->AddNumberEntry(i, value, details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- dictionary = SeededNumberDictionary::cast(result);
- }
- }
+ maybe_dictionary = CopyFastElementsToDictionary(
+ GetIsolate(), array, length, dictionary);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
// Switch to using the dictionary as the backing storage for elements.
if (is_arguments) {
@@ -4583,11 +4673,11 @@ MaybeObject* JSObject::NormalizeElements() {
} else {
// Set the new map first to satify the elements type assert in
// set_elements().
- Object* new_map;
+ Map* new_map;
MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(),
DICTIONARY_ELEMENTS);
- if (!maybe->ToObject(&new_map)) return maybe;
- set_map(Map::cast(new_map));
+ if (!maybe->To(&new_map)) return maybe;
+ set_map(new_map);
set_elements(dictionary);
}
@@ -5182,7 +5272,7 @@ bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
bool JSObject::ReferencesObject(Object* obj) {
Map* map_of_this = map();
Heap* heap = GetHeap();
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
// Is the object the constructor for this object?
if (map_of_this->constructor() == obj) {
@@ -5331,6 +5421,7 @@ MaybeObject* JSObject::PreventExtensions() {
// Do a map transition, other objects with this map may still
// be extensible.
+ // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
Map* new_map;
MaybeObject* maybe = map()->Copy();
if (!maybe->To(&new_map)) return maybe;
@@ -5342,6 +5433,145 @@ MaybeObject* JSObject::PreventExtensions() {
}
+template<typename Dictionary>
+static void FreezeDictionary(Dictionary* dictionary) {
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ PropertyDetails details = dictionary->DetailsAt(i);
+ int attrs = DONT_DELETE;
+ // READ_ONLY is an invalid attribute for JS setters/getters.
+ if (details.type() != CALLBACKS ||
+ !dictionary->ValueAt(i)->IsAccessorPair()) {
+ attrs |= READ_ONLY;
+ }
+ details = details.CopyAddAttributes(
+ static_cast<PropertyAttributes>(attrs));
+ dictionary->DetailsAtPut(i, details);
+ }
+ }
+}
+
+
+MUST_USE_RESULT MaybeObject* JSObject::Freeze(Isolate* isolate) {
+ // Freezing non-strict arguments should be handled elsewhere.
+ ASSERT(!HasNonStrictArgumentsElements());
+
+ Heap* heap = isolate->heap();
+
+ if (map()->is_frozen()) return this;
+
+ if (IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(this,
+ heap->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
+ return heap->false_value();
+ }
+
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return this;
+ ASSERT(proto->IsJSGlobalObject());
+ return JSObject::cast(proto)->Freeze(isolate);
+ }
+
+ // It's not possible to freeze objects with external array elements
+ if (HasExternalArrayElements()) {
+ HandleScope scope(isolate);
+ Handle<Object> object(this, isolate);
+ Handle<Object> error =
+ isolate->factory()->NewTypeError(
+ "cant_prevent_ext_external_array_elements",
+ HandleVector(&object, 1));
+ return isolate->Throw(*error);
+ }
+
+ SeededNumberDictionary* new_element_dictionary = NULL;
+ if (!elements()->IsDictionary()) {
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : elements()->length();
+ if (length > 0) {
+ int capacity = 0;
+ int used = 0;
+ GetElementsCapacityAndUsage(&capacity, &used);
+ MaybeObject* maybe_dict = SeededNumberDictionary::Allocate(heap, used);
+ if (!maybe_dict->To(&new_element_dictionary)) return maybe_dict;
+
+ // Move elements to a dictionary; avoid calling NormalizeElements to avoid
+ // unnecessary transitions.
+ maybe_dict = CopyFastElementsToDictionary(isolate, elements(), length,
+ new_element_dictionary);
+ if (!maybe_dict->To(&new_element_dictionary)) return maybe_dict;
+ } else {
+ // No existing elements, use a pre-allocated empty backing store
+ new_element_dictionary = heap->empty_slow_element_dictionary();
+ }
+ }
+
+ LookupResult result(isolate);
+ map()->LookupTransition(this, heap->frozen_symbol(), &result);
+ if (result.IsTransition()) {
+ Map* transition_map = result.GetTransitionTarget();
+ ASSERT(transition_map->has_dictionary_elements());
+ ASSERT(transition_map->is_frozen());
+ ASSERT(!transition_map->is_extensible());
+ set_map(transition_map);
+ } else if (HasFastProperties() && map()->CanHaveMoreTransitions()) {
+ // Create a new descriptor array with fully-frozen properties
+ int num_descriptors = map()->NumberOfOwnDescriptors();
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors =
+ map()->instance_descriptors()->CopyUpToAddAttributes(num_descriptors,
+ FROZEN);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ Map* new_map;
+ MaybeObject* maybe_new_map = map()->CopyReplaceDescriptors(
+ new_descriptors, INSERT_TRANSITION, heap->frozen_symbol());
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ new_map->freeze();
+ new_map->set_is_extensible(false);
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ set_map(new_map);
+ } else {
+ // Slow path: need to normalize properties for safety
+ MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (maybe->IsFailure()) return maybe;
+
+ // Create a new map, since other objects with this map may be extensible.
+ // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
+ Map* new_map;
+ MaybeObject* maybe_copy = map()->Copy();
+ if (!maybe_copy->To(&new_map)) return maybe_copy;
+ new_map->freeze();
+ new_map->set_is_extensible(false);
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ set_map(new_map);
+
+ // Freeze dictionary-mode properties
+ FreezeDictionary(property_dictionary());
+ }
+
+ ASSERT(map()->has_dictionary_elements());
+ if (new_element_dictionary != NULL) {
+ set_elements(new_element_dictionary);
+ }
+
+ if (elements() != heap->empty_slow_element_dictionary()) {
+ SeededNumberDictionary* dictionary = element_dictionary();
+ // Make sure we never go back to the fast case
+ dictionary->set_requires_slow_elements();
+ // Freeze all elements in the dictionary
+ FreezeDictionary(dictionary);
+ }
+
+ return this;
+}
+
+
MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
StackLimitCheck check(isolate);
if (check.HasOverflowed()) return isolate->StackOverflow();
@@ -6366,9 +6596,9 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
- Name* name,
TransitionFlag flag,
- int descriptor_index) {
+ Name* name,
+ SimpleTransitionFlag simple_flag) {
ASSERT(descriptors->IsSortedNoDuplicates());
Map* result;
@@ -6379,14 +6609,8 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
if (flag == INSERT_TRANSITION && CanHaveMoreTransitions()) {
TransitionArray* transitions;
- SimpleTransitionFlag simple_flag =
- (descriptor_index == descriptors->number_of_descriptors() - 1)
- ? SIMPLE_TRANSITION
- : FULL_TRANSITION;
- ASSERT(name == descriptors->GetKey(descriptor_index));
MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag);
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
-
set_transitions(transitions);
result->SetBackPointer(this);
} else if (flag != OMIT_TRANSITION_KEEP_REPRESENTATIONS) {
@@ -6502,7 +6726,7 @@ MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() {
descriptors->CopyUpTo(number_of_own_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
- return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
+ return CopyReplaceDescriptors(new_descriptors, OMIT_TRANSITION);
}
@@ -6514,7 +6738,7 @@ MaybeObject* Map::Copy() {
descriptors->CopyUpTo(number_of_own_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
- return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
+ return CopyReplaceDescriptors(new_descriptors, OMIT_TRANSITION);
}
@@ -6555,9 +6779,7 @@ MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
}
Name* key = descriptor->GetKey();
- int insertion_index = new_descriptors->number_of_descriptors() - 1;
-
- return CopyReplaceDescriptors(new_descriptors, key, flag, insertion_index);
+ return CopyReplaceDescriptors(new_descriptors, flag, key, SIMPLE_TRANSITION);
}
@@ -6578,7 +6800,8 @@ MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor,
}
-MaybeObject* DescriptorArray::CopyUpTo(int enumeration_index) {
+MaybeObject* DescriptorArray::CopyUpToAddAttributes(
+ int enumeration_index, PropertyAttributes attributes) {
if (enumeration_index == 0) return GetHeap()->empty_descriptor_array();
int size = enumeration_index;
@@ -6588,8 +6811,24 @@ MaybeObject* DescriptorArray::CopyUpTo(int enumeration_index) {
if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(descriptors);
- for (int i = 0; i < size; ++i) {
- descriptors->CopyFrom(i, this, i, witness);
+ if (attributes != NONE) {
+ for (int i = 0; i < size; ++i) {
+ Object* value = GetValue(i);
+ PropertyDetails details = GetDetails(i);
+ int mask = DONT_DELETE | DONT_ENUM;
+ // READ_ONLY is an invalid attribute for JS setters/getters.
+ if (details.type() != CALLBACKS || !value->IsAccessorPair()) {
+ mask |= READ_ONLY;
+ }
+ details = details.CopyAddAttributes(
+ static_cast<PropertyAttributes>(attributes & mask));
+ Descriptor desc(GetKey(i), value, details);
+ descriptors->Set(i, &desc, witness);
+ }
+ } else {
+ for (int i = 0; i < size; ++i) {
+ descriptors->CopyFrom(i, this, i, witness);
+ }
}
if (number_of_descriptors() != enumeration_index) descriptors->Sort();
@@ -6630,7 +6869,11 @@ MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors,
// Re-sort if descriptors were removed.
if (new_size != descriptors->length()) new_descriptors->Sort();
- return CopyReplaceDescriptors(new_descriptors, key, flag, insertion_index);
+ SimpleTransitionFlag simple_flag =
+ (insertion_index == descriptors->number_of_descriptors() - 1)
+ ? SIMPLE_TRANSITION
+ : FULL_TRANSITION;
+ return CopyReplaceDescriptors(new_descriptors, flag, key, simple_flag);
}
@@ -7373,7 +7616,7 @@ MaybeObject* FixedArray::CopySize(int new_length) {
}
FixedArray* result = FixedArray::cast(obj);
// Copy the content
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
int len = length();
if (new_length < len) len = new_length;
// We are taking the map from the old fixed array so the map is sure to
@@ -7388,7 +7631,7 @@ MaybeObject* FixedArray::CopySize(int new_length) {
void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
for (int index = 0; index < len; index++) {
dest->set(dest_pos+index, get(pos+index), mode);
@@ -7672,6 +7915,7 @@ bool String::LooksValid() {
String::FlatContent String::GetFlatContent() {
+ ASSERT(!AllowHeapAllocation::IsAllowed());
int length = this->length();
StringShape shape(this);
String* string = this;
@@ -7898,6 +8142,8 @@ void FlatStringReader::PostGarbageCollection() {
if (str_ == NULL) return;
Handle<String> str(str_);
ASSERT(str->IsFlat());
+ DisallowHeapAllocation no_gc;
+ // This does not actually prevent the vector from being relocated later.
String::FlatContent content = str->GetFlatContent();
ASSERT(content.IsFlat());
is_ascii_ = content.IsAscii();
@@ -8295,6 +8541,8 @@ class StringComparator {
const uint8_t* buffer8_;
const uint16_t* buffer16_;
};
+
+ private:
DISALLOW_IMPLICIT_CONSTRUCTORS(State);
};
@@ -8449,6 +8697,7 @@ bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
int slen = length();
if (str.length() != slen) return false;
+ DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent();
if (content.IsAscii()) {
return CompareChars(content.ToOneByteVector().start(),
@@ -8464,6 +8713,7 @@ bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
int slen = length();
if (str.length() != slen) return false;
+ DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent();
if (content.IsTwoByte()) {
return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
@@ -9043,7 +9293,10 @@ MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
new_code_map->set(old_length + 1, code);
new_code_map->set(old_length + 2, literals);
// Zap the old map for the sake of the heap verifier.
- if (Heap::ShouldZapGarbage()) ZapOptimizedCodeMap();
+ if (Heap::ShouldZapGarbage()) {
+ Object** data = old_code_map->data_start();
+ MemsetPointer(data, heap->the_hole_value(), old_length);
+ }
}
#ifdef DEBUG
for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
@@ -9137,14 +9390,6 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
}
-void SharedFunctionInfo::ZapOptimizedCodeMap() {
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
- MemsetPointer(code_map->data_start(),
- GetHeap()->the_hole_value(),
- code_map->length());
-}
-
-
bool JSFunction::CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
bool result = true;
@@ -9435,114 +9680,6 @@ int SharedFunctionInfo::CalculateInObjectProperties() {
}
-bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
- // Check the basic conditions for generating inline constructor code.
- if (!FLAG_inline_new
- || !has_only_simple_this_property_assignments()
- || is_generator()
- || this_property_assignments_count() == 0) {
- return false;
- }
-
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
-
- // Traverse the proposed prototype chain looking for properties of the
- // same names as are set by the inline constructor.
- for (Object* obj = prototype;
- obj != heap->null_value();
- obj = obj->GetPrototype(isolate)) {
- JSReceiver* receiver = JSReceiver::cast(obj);
- for (int i = 0; i < this_property_assignments_count(); i++) {
- LookupResult result(heap->isolate());
- String* name = GetThisPropertyAssignmentName(i);
- receiver->LocalLookup(name, &result);
- if (result.IsFound()) {
- switch (result.type()) {
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- break;
- case INTERCEPTOR:
- case CALLBACKS:
- case HANDLER:
- return false;
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- return true;
-}
-
-
-void SharedFunctionInfo::ForbidInlineConstructor() {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- false));
-}
-
-
-void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
- bool only_simple_this_property_assignments,
- FixedArray* assignments) {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- only_simple_this_property_assignments));
- set_this_property_assignments(assignments);
- set_this_property_assignments_count(assignments->length() / 3);
-}
-
-
-void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
- Heap* heap = GetHeap();
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- false));
- set_this_property_assignments(heap->undefined_value());
- set_this_property_assignments_count(0);
-}
-
-
-String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
- Object* obj = this_property_assignments();
- ASSERT(obj->IsFixedArray());
- ASSERT(index < this_property_assignments_count());
- obj = FixedArray::cast(obj)->get(index * 3);
- ASSERT(obj->IsString());
- return String::cast(obj);
-}
-
-
-bool SharedFunctionInfo::IsThisPropertyAssignmentArgument(int index) {
- Object* obj = this_property_assignments();
- ASSERT(obj->IsFixedArray());
- ASSERT(index < this_property_assignments_count());
- obj = FixedArray::cast(obj)->get(index * 3 + 1);
- return Smi::cast(obj)->value() != -1;
-}
-
-
-int SharedFunctionInfo::GetThisPropertyAssignmentArgument(int index) {
- ASSERT(IsThisPropertyAssignmentArgument(index));
- Object* obj =
- FixedArray::cast(this_property_assignments())->get(index * 3 + 1);
- return Smi::cast(obj)->value();
-}
-
-
-Object* SharedFunctionInfo::GetThisPropertyAssignmentConstant(int index) {
- ASSERT(!IsThisPropertyAssignmentArgument(index));
- Object* obj =
- FixedArray::cast(this_property_assignments())->get(index * 3 + 2);
- return obj;
-}
-
-
// Support function for printing the source code to a StringStream
// without any allocation in the heap.
void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
@@ -9599,7 +9736,7 @@ static bool IsCodeEquivalent(Code* code, Code* recompiled) {
void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
ASSERT(!has_deoptimization_support());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Code* code = this->code();
if (IsCodeEquivalent(code, recompiled)) {
// Copy the deoptimization data from the recompiled code.
@@ -9916,7 +10053,7 @@ void Code::CopyFrom(const CodeDesc& desc) {
RelocInfo::kApplyMask;
// Needed to find target_object and runtime_entry on X64
Assembler* origin = desc.origin;
- ALLOW_HANDLE_DEREF(GetIsolate(), "embedding raw addresses into code");
+ AllowDeferredHandleDereference embedding_raw_address;
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
@@ -10007,7 +10144,7 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
Map* Code::FindFirstMap() {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -10020,7 +10157,7 @@ Map* Code::FindFirstMap() {
void Code::ReplaceFirstMap(Map* replace_with) {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -10036,7 +10173,7 @@ void Code::ReplaceFirstMap(Map* replace_with) {
void Code::FindAllMaps(MapHandleList* maps) {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -10048,7 +10185,7 @@ void Code::FindAllMaps(MapHandleList* maps) {
Code* Code::FindFirstCode() {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -10060,7 +10197,7 @@ Code* Code::FindFirstCode() {
void Code::FindAllCode(CodeHandleList* code_list, int length) {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
int i = 0;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
@@ -10076,7 +10213,7 @@ void Code::FindAllCode(CodeHandleList* code_list, int length) {
Name* Code::FindFirstName() {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -10730,17 +10867,63 @@ static bool GetOldValue(Isolate* isolate,
Handle<JSObject> object,
uint32_t index,
List<Handle<Object> >* old_values,
- List<Handle<String> >* indices) {
+ List<uint32_t>* indices) {
PropertyAttributes attributes = object->GetLocalElementAttribute(index);
ASSERT(attributes != ABSENT);
if (attributes == DONT_DELETE) return false;
old_values->Add(object->GetLocalElementAccessorPair(index) == NULL
? Object::GetElement(object, index)
: Handle<Object>::cast(isolate->factory()->the_hole_value()));
- indices->Add(isolate->factory()->Uint32ToString(index));
+ indices->Add(index);
return true;
}
+static void EnqueueSpliceRecord(Handle<JSArray> object,
+ uint32_t index,
+ Handle<JSArray> deleted,
+ uint32_t add_count) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> index_object = isolate->factory()->NewNumberFromUint(index);
+ Handle<Object> add_count_object =
+ isolate->factory()->NewNumberFromUint(add_count);
+
+ Handle<Object> args[] =
+ { object, index_object, deleted, add_count_object };
+
+ bool threw;
+ Execution::Call(Handle<JSFunction>(isolate->observers_enqueue_splice()),
+ isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
+ &threw);
+ ASSERT(!threw);
+}
+
+
+static void BeginPerformSplice(Handle<JSArray> object) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> args[] = { object };
+
+ bool threw;
+ Execution::Call(Handle<JSFunction>(isolate->observers_begin_perform_splice()),
+ isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
+ &threw);
+ ASSERT(!threw);
+}
+
+
+static void EndPerformSplice(Handle<JSArray> object) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> args[] = { object };
+
+ bool threw;
+ Execution::Call(Handle<JSFunction>(isolate->observers_end_perform_splice()),
+ isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
+ &threw);
+ ASSERT(!threw);
+}
+
MaybeObject* JSArray::SetElementsLength(Object* len) {
// We should never end in here with a pixel or external array.
@@ -10751,7 +10934,7 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
Handle<JSArray> self(this);
- List<Handle<String> > indices;
+ List<uint32_t> indices;
List<Handle<Object> > old_values;
Handle<Object> old_length_handle(self->length(), isolate);
Handle<Object> new_length_handle(len, isolate);
@@ -10791,15 +10974,38 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
if (!result->ToHandle(&hresult, isolate)) return result;
CHECK(self->length()->ToArrayIndex(&new_length));
- if (old_length != new_length) {
- for (int i = 0; i < indices.length(); ++i) {
- JSObject::EnqueueChangeRecord(
- self, "deleted", indices[i], old_values[i]);
- }
+ if (old_length == new_length) return *hresult;
+
+ BeginPerformSplice(self);
+
+ for (int i = 0; i < indices.length(); ++i) {
JSObject::EnqueueChangeRecord(
- self, "updated", isolate->factory()->length_string(),
- old_length_handle);
+ self, "deleted", isolate->factory()->Uint32ToString(indices[i]),
+ old_values[i]);
}
+ JSObject::EnqueueChangeRecord(
+ self, "updated", isolate->factory()->length_string(),
+ old_length_handle);
+
+ EndPerformSplice(self);
+
+ uint32_t index = Min(old_length, new_length);
+ uint32_t add_count = new_length > old_length ? new_length - old_length : 0;
+ uint32_t delete_count = new_length < old_length ? old_length - new_length : 0;
+ Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
+ if (delete_count > 0) {
+ for (int i = indices.length() - 1; i >= 0; i--) {
+ JSObject::SetElement(deleted, indices[i] - index, old_values[i], NONE,
+ kNonStrictMode);
+ }
+
+ SetProperty(deleted, isolate->factory()->length_string(),
+ isolate->factory()->NewNumberFromUint(delete_count),
+ NONE, kNonStrictMode);
+ }
+
+ EnqueueSpliceRecord(self, index, deleted, add_count);
+
return *hresult;
}
@@ -10963,7 +11169,7 @@ class DeoptimizeDependentCodeFilter : public OptimizedFunctionFilter {
void DependentCode::DeoptimizeDependentCodeGroup(
Isolate* isolate,
DependentCode::DependencyGroup group) {
- AssertNoAllocation no_allocation_scope;
+ DisallowHeapAllocation no_allocation_scope;
DependentCode::GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
@@ -11027,7 +11233,8 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
// Cycle detected.
HandleScope scope(isolate);
return isolate->Throw(
- *FACTORY->NewError("cyclic_proto", HandleVector<Object>(NULL, 0)));
+ *isolate->factory()->NewError("cyclic_proto",
+ HandleVector<Object>(NULL, 0)));
}
}
@@ -11824,14 +12031,15 @@ MaybeObject* JSObject::SetElement(uint32_t index,
Handle<Object> value(value_raw, isolate);
PropertyAttributes old_attributes = self->GetLocalElementAttribute(index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
- Handle<Object> old_length;
+ Handle<Object> old_length_handle;
+ Handle<Object> new_length_handle;
if (old_attributes != ABSENT) {
if (self->GetLocalElementAccessorPair(index) == NULL)
old_value = Object::GetElement(self, index);
} else if (self->IsJSArray()) {
// Store old array length in case adding an element grows the array.
- old_length = handle(Handle<JSArray>::cast(self)->length(), isolate);
+ old_length_handle = handle(Handle<JSArray>::cast(self)->length(), isolate);
}
// Check for lookup interceptor
@@ -11847,11 +12055,25 @@ MaybeObject* JSObject::SetElement(uint32_t index,
Handle<String> name = isolate->factory()->Uint32ToString(index);
PropertyAttributes new_attributes = self->GetLocalElementAttribute(index);
if (old_attributes == ABSENT) {
- EnqueueChangeRecord(self, "new", name, old_value);
if (self->IsJSArray() &&
- !old_length->SameValue(Handle<JSArray>::cast(self)->length())) {
- EnqueueChangeRecord(
- self, "updated", isolate->factory()->length_string(), old_length);
+ !old_length_handle->SameValue(Handle<JSArray>::cast(self)->length())) {
+ new_length_handle = handle(Handle<JSArray>::cast(self)->length(),
+ isolate);
+ uint32_t old_length = 0;
+ uint32_t new_length = 0;
+ CHECK(old_length_handle->ToArrayIndex(&old_length));
+ CHECK(new_length_handle->ToArrayIndex(&new_length));
+
+ BeginPerformSplice(Handle<JSArray>::cast(self));
+ EnqueueChangeRecord(self, "new", name, old_value);
+ EnqueueChangeRecord(self, "updated", isolate->factory()->length_string(),
+ old_length_handle);
+ EndPerformSplice(Handle<JSArray>::cast(self));
+ Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
+ EnqueueSpliceRecord(Handle<JSArray>::cast(self), old_length, deleted,
+ new_length - old_length);
+ } else {
+ EnqueueChangeRecord(self, "new", name, old_value);
}
} else if (old_value->IsTheHole()) {
EnqueueChangeRecord(self, "reconfigured", name, old_value);
@@ -12382,7 +12604,7 @@ template<typename Shape, typename Key>
void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
int pos = 0;
int capacity = HashTable<Shape, Key>::Capacity();
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
Object* k = Dictionary<Shape, Key>::KeyAt(i);
@@ -13263,7 +13485,7 @@ template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
ASSERT(NumberOfElements() < new_table->Capacity());
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
// Copy prefix to new array.
@@ -13388,13 +13610,13 @@ template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
template class Dictionary<UnseededNumberDictionaryShape, uint32_t>;
template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- Allocate(Heap* heap, int at_least_space_for);
+ Allocate(Heap* heap, int at_least_space_for, PretenureFlag pretenure);
template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- Allocate(Heap* heap, int at_least_space_for);
+ Allocate(Heap* heap, int at_least_space_for, PretenureFlag pretenure);
template MaybeObject* Dictionary<NameDictionaryShape, Name*>::
- Allocate(Heap* heap, int n);
+ Allocate(Heap* heap, int n, PretenureFlag pretenure);
template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::AtPut(
uint32_t, Object*);
@@ -13506,7 +13728,7 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
}
SeededNumberDictionary* new_dict = SeededNumberDictionary::cast(obj);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_alloc;
uint32_t pos = 0;
uint32_t undefs = 0;
@@ -13586,12 +13808,13 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
Heap* heap = GetHeap();
+ ASSERT(!map()->is_observed());
if (HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
SeededNumberDictionary* dict = element_dictionary();
if (IsJSArray() || dict->requires_slow_elements() ||
- dict->max_number_key() >= limit || map()->is_observed()) {
+ dict->max_number_key() >= limit) {
return PrepareSlowElementsForSort(limit);
}
// Convert to fast elements.
@@ -13677,11 +13900,11 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
}
} else {
FixedArray* elements = FixedArray::cast(elements_base);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
// Split elements into defined, undefined and the_hole, in that order. Only
// count locations for undefined and the hole, and fill them afterwards.
- WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
+ WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_gc);
unsigned int undefs = limit;
unsigned int holes = limit;
// Assume most arrays contain no holes and undefined values, so minimize the
@@ -14338,10 +14561,15 @@ MaybeObject* MapCache::Put(FixedArray* array, Map* value) {
template<typename Shape, typename Key>
MaybeObject* Dictionary<Shape, Key>::Allocate(Heap* heap,
- int at_least_space_for) {
+ int at_least_space_for,
+ PretenureFlag pretenure) {
Object* obj;
{ MaybeObject* maybe_obj =
- HashTable<Shape, Key>::Allocate(heap, at_least_space_for);
+ HashTable<Shape, Key>::Allocate(
+ heap,
+ at_least_space_for,
+ HashTable<Shape, Key>::USE_DEFAULT_MINIMUM_CAPACITY,
+ pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
// Initialize the next enumeration index.
@@ -15064,7 +15292,7 @@ Handle<DeclaredAccessorDescriptor> DeclaredAccessorDescriptor::Create(
value->set_serialized_data(*serialized_descriptor);
// Copy in the data.
{
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
uint8_t* array = serialized_descriptor->GetDataStartAddress();
if (previous_length != 0) {
uint8_t* previous_array =
@@ -15272,6 +15500,8 @@ void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
// Add the specified break point object.
void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<Object> break_point_object) {
+ Isolate* isolate = break_point_info->GetIsolate();
+
// If there was no break point objects before just set it.
if (break_point_info->break_point_objects()->IsUndefined()) {
break_point_info->set_break_point_objects(*break_point_object);
@@ -15281,7 +15511,7 @@ void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
if (break_point_info->break_point_objects() == *break_point_object) return;
// If there was one break point object before replace with array.
if (!break_point_info->break_point_objects()->IsFixedArray()) {
- Handle<FixedArray> array = FACTORY->NewFixedArray(2);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(2);
array->set(0, break_point_info->break_point_objects());
array->set(1, *break_point_object);
break_point_info->set_break_point_objects(*array);
@@ -15292,7 +15522,7 @@ void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<FixedArray>(
FixedArray::cast(break_point_info->break_point_objects()));
Handle<FixedArray> new_array =
- FACTORY->NewFixedArray(old_array->length() + 1);
+ isolate->factory()->NewFixedArray(old_array->length() + 1);
for (int i = 0; i < old_array->length(); i++) {
// If the break point was there before just ignore.
if (old_array->get(i) == *break_point_object) return;
@@ -15465,4 +15695,19 @@ void JSDate::SetLocalFields(int64_t local_time_ms, DateCache* date_cache) {
set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
}
+
+void JSArrayBuffer::Neuter() {
+ ASSERT(is_external());
+ set_backing_store(NULL);
+ set_byte_length(Smi::FromInt(0));
+}
+
+
+void JSTypedArray::Neuter() {
+ set_byte_offset(Smi::FromInt(0));
+ set_byte_length(Smi::FromInt(0));
+ set_length(Smi::FromInt(0));
+ set_elements(GetHeap()->EmptyExternalArrayForMap(map()));
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index ac74162962..1ee31b6dee 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -29,6 +29,7 @@
#define V8_OBJECTS_H_
#include "allocation.h"
+#include "assert-scope.h"
#include "builtins.h"
#include "elements-kind.h"
#include "list.h"
@@ -124,6 +125,7 @@
// - Foreign
// - SharedFunctionInfo
// - Struct
+// - Box
// - DeclaredAccessorDescriptor
// - AccessorInfo
// - DeclaredAccessorInfo
@@ -150,11 +152,6 @@
namespace v8 {
namespace internal {
-enum CompareMapMode {
- REQUIRE_EXACT_MAP,
- ALLOW_ELEMENT_TRANSITION_MAPS
-};
-
enum KeyedAccessStoreMode {
STANDARD_STORE,
STORE_TRANSITION_SMI_TO_OBJECT,
@@ -352,6 +349,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(CODE_TYPE) \
V(ODDBALL_TYPE) \
V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
+ V(BOX_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
V(FOREIGN_TYPE) \
@@ -530,6 +528,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
#define STRUCT_LIST_ALL(V) \
+ V(BOX, Box, box) \
V(DECLARED_ACCESSOR_DESCRIPTOR, \
DeclaredAccessorDescriptor, \
declared_accessor_descriptor) \
@@ -671,6 +670,7 @@ enum InstanceType {
CODE_TYPE,
ODDBALL_TYPE,
JS_GLOBAL_PROPERTY_CELL_TYPE,
+ BOX_TYPE,
// "Data", objects that cannot contain non-map-word pointers to heap
// objects.
@@ -873,6 +873,7 @@ class MaybeObject BASE_EMBEDDED {
inline bool IsOutOfMemory();
inline bool IsException();
INLINE(bool IsTheHole());
+ INLINE(bool IsUninitialized());
inline bool ToObject(Object** obj) {
if (IsFailure()) return false;
*obj = reinterpret_cast<Object*>(this);
@@ -1050,6 +1051,7 @@ class Object : public MaybeObject {
INLINE(bool IsUndefined());
INLINE(bool IsNull());
INLINE(bool IsTheHole()); // Shadows MaybeObject's implementation.
+ INLINE(bool IsUninitialized());
INLINE(bool IsTrue());
INLINE(bool IsFalse());
inline bool IsArgumentsMarker();
@@ -1061,17 +1063,27 @@ class Object : public MaybeObject {
// Extract the number.
inline double Number();
inline bool IsNaN();
+ bool ToInt32(int32_t* value);
+ bool ToUint32(uint32_t* value);
+
+ // Indicates whether OptimalRepresentation can do its work, or whether it
+ // always has to return Representation::Tagged().
+ enum ValueType {
+ OPTIMAL_REPRESENTATION,
+ FORCE_TAGGED
+ };
- inline Representation OptimalRepresentation() {
- if (FLAG_track_fields && IsSmi()) {
+ inline Representation OptimalRepresentation(
+ ValueType type = OPTIMAL_REPRESENTATION) {
+ if (!FLAG_track_fields) return Representation::Tagged();
+ if (type == FORCE_TAGGED) return Representation::Tagged();
+ if (IsSmi()) {
return Representation::Smi();
} else if (FLAG_track_double_fields && IsHeapNumber()) {
return Representation::Double();
- } else if (FLAG_track_heap_object_fields && !IsUndefined()) {
- // Don't track undefined as heapobject because it's also used as temporary
- // value for computed fields that may turn out to be Smi. That combination
- // will go tagged, so go tagged immediately.
- // TODO(verwaest): Change once we track computed boilerplate fields.
+ } else if (FLAG_track_computed_fields && IsUninitialized()) {
+ return Representation::None();
+ } else if (FLAG_track_heap_object_fields) {
ASSERT(IsHeapObject());
return Representation::HeapObject();
} else {
@@ -1080,7 +1092,9 @@ class Object : public MaybeObject {
}
inline bool FitsRepresentation(Representation representation) {
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (FLAG_track_fields && representation.IsNone()) {
+ return false;
+ } else if (FLAG_track_fields && representation.IsSmi()) {
return IsSmi();
} else if (FLAG_track_double_fields && representation.IsDouble()) {
return IsNumber();
@@ -1419,11 +1433,12 @@ class HeapObject: public Object {
static inline HeapObject* cast(Object* obj);
// Return the write barrier mode for this. Callers of this function
- // must be able to present a reference to an AssertNoAllocation
+ // must be able to present a reference to an DisallowHeapAllocation
// object as a sign that they are not going to use this function
// from code that allocates and thus invalidates the returned write
// barrier mode.
- inline WriteBarrierMode GetWriteBarrierMode(const AssertNoAllocation&);
+ inline WriteBarrierMode GetWriteBarrierMode(
+ const DisallowHeapAllocation& promise);
// Dispatched behavior.
void HeapObjectShortPrint(StringStream* accumulator);
@@ -1828,7 +1843,8 @@ class JSObject: public JSReceiver {
Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ ValueType value_type = OPTIMAL_REPRESENTATION);
static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
@@ -1848,11 +1864,15 @@ class JSObject: public JSReceiver {
static void MigrateInstance(Handle<JSObject> instance);
inline MUST_USE_RESULT MaybeObject* MigrateInstance();
+ static Handle<Object> TryMigrateInstance(Handle<JSObject> instance);
+ inline MUST_USE_RESULT MaybeObject* TryMigrateInstance();
+
// Can cause GC.
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
Name* key,
Object* value,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ ValueType value_type = OPTIMAL_REPRESENTATION);
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
@@ -2214,7 +2234,8 @@ class JSObject: public JSReceiver {
Name* name,
Object* value,
PropertyAttributes attributes,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
+ ValueType value_type = OPTIMAL_REPRESENTATION);
// Add a property to a slow-case object.
MUST_USE_RESULT MaybeObject* AddSlowProperty(Name* name,
@@ -2228,7 +2249,8 @@ class JSObject: public JSReceiver {
PropertyAttributes attributes,
StrictModeFlag strict_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
+ ValueType value_type = OPTIMAL_REPRESENTATION);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -2297,6 +2319,9 @@ class JSObject: public JSReceiver {
static Handle<Object> PreventExtensions(Handle<JSObject> object);
MUST_USE_RESULT MaybeObject* PreventExtensions();
+ // ES5 Object.freeze
+ MUST_USE_RESULT MaybeObject* Freeze(Isolate* isolate);
+
// Copy object
MUST_USE_RESULT MaybeObject* DeepCopy(Isolate* isolate);
@@ -2834,7 +2859,13 @@ class DescriptorArray: public FixedArray {
int new_size,
DescriptorArray* other);
- MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index);
+ MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index) {
+ return CopyUpToAddAttributes(enumeration_index, NONE);
+ }
+
+ MUST_USE_RESULT MaybeObject* CopyUpToAddAttributes(
+ int enumeration_index,
+ PropertyAttributes attributes);
// Sort the instance descriptors by the hash codes of their keys.
void Sort();
@@ -3369,8 +3400,10 @@ class Dictionary: public HashTable<Shape, Key> {
}
// Returns a new array for dictionary usage. Might return Failure.
- MUST_USE_RESULT static MaybeObject* Allocate(Heap* heap,
- int at_least_space_for);
+ MUST_USE_RESULT static MaybeObject* Allocate(
+ Heap* heap,
+ int at_least_space_for,
+ PretenureFlag pretenure = NOT_TENURED);
// Ensure enough space for n additional elements.
MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
@@ -3664,7 +3697,7 @@ class ScopeInfo : public FixedArray {
static inline ScopeInfo* cast(Object* object);
// Return the type of this scope.
- ScopeType Type();
+ ScopeType scope_type();
// Does this scope call eval?
bool CallsEval();
@@ -3847,7 +3880,7 @@ class ScopeInfo : public FixedArray {
};
// Properties of scopes.
- class TypeField: public BitField<ScopeType, 0, 3> {};
+ class ScopeTypeField: public BitField<ScopeType, 0, 3> {};
class CallsEvalField: public BitField<bool, 3, 1> {};
class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
class FunctionVariableField: public BitField<FunctionVariableInfo, 6, 2> {};
@@ -4525,8 +4558,16 @@ class Code: public HeapObject {
inline Kind kind();
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
+
inline ExtraICState extended_extra_ic_state(); // Only valid for
// non-call IC stubs.
+ static bool needs_extended_extra_ic_state(Kind kind) {
+ // TODO(danno): This is a bit of a hack right now since there are still
+ // clients of this API that pass "extra" values in for argc. These clients
+ // should be retrofitted to used ExtendedExtraICState.
+ return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC;
+ }
+
inline StubType type(); // Only valid for monomorphic IC stubs.
inline int arguments_count(); // Only valid for call IC stubs.
@@ -4619,7 +4660,6 @@ class Code: public HeapObject {
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
- inline void set_to_boolean_state(byte value);
// [compare_nil]: For kind COMPARE_NIL_IC tells what state the stub is in.
byte compare_nil_types();
@@ -4851,9 +4891,6 @@ class Code: public HeapObject {
static const int kUnaryOpTypeFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kUnaryOpTypeBitCount = 3;
- static const int kToBooleanStateFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kToBooleanStateBitCount = 8;
static const int kHasFunctionCacheFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kHasFunctionCacheBitCount = 1;
@@ -4863,7 +4900,6 @@ class Code: public HeapObject {
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
- STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
STATIC_ASSERT(kMarkedForDeoptimizationFirstBit +
kMarkedForDeoptimizationBitCount <= 32);
@@ -4872,8 +4908,6 @@ class Code: public HeapObject {
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
class UnaryOpTypeField: public BitField<int,
kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT
- class ToBooleanStateField: public BitField<int,
- kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
class HasFunctionCacheField: public BitField<bool,
kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT
class MarkedForDeoptimizationField: public BitField<bool,
@@ -5071,6 +5105,7 @@ class Map: public HeapObject {
class OwnsDescriptors: public BitField<bool, 25, 1> {};
class IsObserved: public BitField<bool, 26, 1> {};
class Deprecated: public BitField<bool, 27, 1> {};
+ class IsFrozen: public BitField<bool, 28, 1> {};
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -5373,6 +5408,8 @@ class Map: public HeapObject {
inline void set_owns_descriptors(bool is_shared);
inline bool is_observed();
inline void set_is_observed(bool is_observed);
+ inline void freeze();
+ inline bool is_frozen();
inline void deprecate();
inline bool is_deprecated();
inline bool CanBeDeprecated();
@@ -5387,9 +5424,9 @@ class Map: public HeapObject {
MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
DescriptorArray* descriptors,
- Name* name,
TransitionFlag flag,
- int descriptor_index);
+ Name* name = NULL,
+ SimpleTransitionFlag simple_flag = FULL_TRANSITION);
MUST_USE_RESULT MaybeObject* CopyInstallDescriptors(
int new_descriptor,
DescriptorArray* descriptors);
@@ -5426,6 +5463,13 @@ class Map: public HeapObject {
int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
PropertyAttributes filter = NONE);
+ // Returns the number of slots allocated for the initial properties
+ // backing storage for instances of this map.
+ int InitialPropertiesLength() {
+ return pre_allocated_property_fields() + unused_property_fields() -
+ inobject_properties();
+ }
+
// Casting.
static inline Map* cast(Object* obj);
@@ -5645,6 +5689,26 @@ class Struct: public HeapObject {
};
+// A simple one-element struct, useful where smis need to be boxed.
+class Box : public Struct {
+ public:
+ // [value]: the boxed contents.
+ DECL_ACCESSORS(value, Object)
+
+ static inline Box* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(Box)
+ DECLARE_VERIFIER(Box)
+
+ static const int kValueOffset = HeapObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Box);
+};
+
+
// Script describes a script which has been added to the VM.
class Script: public Struct {
public:
@@ -5829,9 +5893,6 @@ class SharedFunctionInfo: public HeapObject {
// Trims the optimized code map after entries have been removed.
void TrimOptimizedCodeMap(int shrink_by);
- // Zaps the contents of backing optimized code map.
- void ZapOptimizedCodeMap();
-
// Add a new entry to the optimized code map.
MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context,
Code* code,
@@ -6056,18 +6117,6 @@ class SharedFunctionInfo: public HeapObject {
inline int ic_age();
inline void set_ic_age(int age);
- // Add information on assignments of the form this.x = ...;
- void SetThisPropertyAssignmentsInfo(
- bool has_only_simple_this_property_assignments,
- FixedArray* this_property_assignments);
-
- // Clear information on assignments of the form this.x = ...;
- void ClearThisPropertyAssignmentsInfo();
-
- // Indicate that this function only consists of assignments of the form
- // this.x = y; where y is either a constant or refers to an argument.
- inline bool has_only_simple_this_property_assignments();
-
// Indicates if this function can be lazy compiled.
// This is used to determine if we can safely flush code from a function
// when doing GC if we expect that the function will no longer be used.
@@ -6168,24 +6217,6 @@ class SharedFunctionInfo: public HeapObject {
// disabled).
bool VerifyBailoutId(BailoutId id);
- // Check whether a inlined constructor can be generated with the given
- // prototype.
- bool CanGenerateInlineConstructor(Object* prototype);
-
- // Prevents further attempts to generate inline constructors.
- // To be called if generation failed for any reason.
- void ForbidInlineConstructor();
-
- // For functions which only contains this property assignments this provides
- // access to the names for the properties assigned.
- DECL_ACCESSORS(this_property_assignments, Object)
- inline int this_property_assignments_count();
- inline void set_this_property_assignments_count(int value);
- String* GetThisPropertyAssignmentName(int index);
- bool IsThisPropertyAssignmentArgument(int index);
- int GetThisPropertyAssignmentArgument(int index);
- Object* GetThisPropertyAssignmentConstant(int index);
-
// [source code]: Source code for the function.
bool HasSourceCode();
Handle<Object> GetSourceCode();
@@ -6255,12 +6286,10 @@ class SharedFunctionInfo: public HeapObject {
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
static const int kInitialMapOffset =
kInferredNameOffset + kPointerSize;
- static const int kThisPropertyAssignmentsOffset =
- kInitialMapOffset + kPointerSize;
// ast_node_count is a Smi field. It could be grouped with another Smi field
// into a PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
static const int kAstNodeCountOffset =
- kThisPropertyAssignmentsOffset + kPointerSize;
+ kInitialMapOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
@@ -6278,10 +6307,7 @@ class SharedFunctionInfo: public HeapObject {
kEndPositionOffset + kPointerSize;
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kPointerSize;
- static const int kThisPropertyAssignmentsCountOffset =
- kCompilerHintsOffset + kPointerSize;
- static const int kOptCountOffset =
- kThisPropertyAssignmentsCountOffset + kPointerSize;
+ static const int kOptCountOffset = kCompilerHintsOffset + kPointerSize;
static const int kCountersOffset = kOptCountOffset + kPointerSize;
static const int kStressDeoptCounterOffset = kCountersOffset + kPointerSize;
@@ -6317,10 +6343,7 @@ class SharedFunctionInfo: public HeapObject {
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kIntSize;
- static const int kThisPropertyAssignmentsCountOffset =
- kCompilerHintsOffset + kIntSize;
- static const int kOptCountOffset =
- kThisPropertyAssignmentsCountOffset + kIntSize;
+ static const int kOptCountOffset = kCompilerHintsOffset + kIntSize;
static const int kCountersOffset = kOptCountOffset + kIntSize;
static const int kStressDeoptCounterOffset = kCountersOffset + kIntSize;
@@ -6344,7 +6367,7 @@ class SharedFunctionInfo: public HeapObject {
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
typedef FixedBodyDescriptor<kNameOffset,
- kThisPropertyAssignmentsOffset + kPointerSize,
+ kInitialMapOffset + kPointerSize,
kSize> BodyDescriptor;
// Bit positions in start_position_and_type.
@@ -6360,7 +6383,6 @@ class SharedFunctionInfo: public HeapObject {
static const int kCodeAgeMask = (1 << kCodeAgeSize) - 1;
enum CompilerHints {
- kHasOnlySimpleThisPropertyAssignments,
kAllowLazyCompilation,
kAllowLazyCompilationWithoutContext,
kLiveObjectsMayExist,
@@ -6489,7 +6511,7 @@ class JSGeneratorObject: public JSObject {
static const int kSize = kStackHandlerIndexOffset + kPointerSize;
// Resume mode, for use by runtime functions.
- enum ResumeMode { SEND, THROW };
+ enum ResumeMode { NEXT, THROW };
// Yielding from a generator returns an object with the following inobject
// properties. See Context::generator_result_map() for the map.
@@ -8501,7 +8523,8 @@ class Oddball: public HeapObject {
static const byte kNull = 3;
static const byte kArgumentMarker = 4;
static const byte kUndefined = 5;
- static const byte kOther = 6;
+ static const byte kUninitialized = 6;
+ static const byte kOther = 7;
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
@@ -8757,18 +8780,42 @@ class JSArrayBuffer: public JSObject {
// [byte_length]: length in bytes
DECL_ACCESSORS(byte_length, Object)
+ // [flags]
+ DECL_ACCESSORS(flag, Smi)
+
+ inline bool is_external();
+ inline void set_is_external(bool value);
+
+ // [weak_next]: linked list of array buffers.
+ DECL_ACCESSORS(weak_next, Object)
+
+ // [weak_first_array]: weak linked list of typed arrays.
+ DECL_ACCESSORS(weak_first_array, Object)
+
// Casting.
static inline JSArrayBuffer* cast(Object* obj);
+ // Neutering. Only neuters the buffer, not associated typed arrays.
+ void Neuter();
+
// Dispatched behavior.
DECLARE_PRINTER(JSArrayBuffer)
DECLARE_VERIFIER(JSArrayBuffer)
static const int kBackingStoreOffset = JSObject::kHeaderSize;
static const int kByteLengthOffset = kBackingStoreOffset + kPointerSize;
- static const int kSize = kByteLengthOffset + kPointerSize;
+ static const int kFlagOffset = kByteLengthOffset + kPointerSize;
+ static const int kWeakNextOffset = kFlagOffset + kPointerSize;
+ static const int kWeakFirstArrayOffset = kWeakNextOffset + kPointerSize;
+ static const int kSize = kWeakFirstArrayOffset + kPointerSize;
+
+ static const int kSizeWithInternalFields =
+ kSize + v8::ArrayBuffer::kInternalFieldCount * kPointerSize;
private:
+ // Bit position in a flag
+ static const int kIsExternalBit = 0;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
};
@@ -8787,6 +8834,12 @@ class JSTypedArray: public JSObject {
// [length]: length of typed array in elements.
DECL_ACCESSORS(length, Object)
+ // [weak_next]: linked list of typed arrays over the same array buffer.
+ DECL_ACCESSORS(weak_next, Object)
+
+ // Neutering. Only neuters this typed array.
+ void Neuter();
+
// Casting.
static inline JSTypedArray* cast(Object* obj);
@@ -8801,7 +8854,8 @@ class JSTypedArray: public JSObject {
static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
static const int kLengthOffset = kByteLengthOffset + kPointerSize;
- static const int kSize = kLengthOffset + kPointerSize;
+ static const int kWeakNextOffset = kLengthOffset + kPointerSize;
+ static const int kSize = kWeakNextOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index 1e2e0a85df..b2abc813ab 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -42,6 +42,9 @@ void OptimizingCompilerThread::Run() {
thread_id_ = ThreadId::Current().ToInteger();
#endif
Isolate::SetIsolateThreadLocals(isolate_, NULL);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
int64_t epoch = 0;
if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
@@ -89,6 +92,7 @@ void OptimizingCompilerThread::CompileNext() {
// Mark it for installing before queuing so that we can be sure of the write
// order: marking first and (after being queued) installing code second.
{ Heap::RelocationLock relocation_lock(isolate_->heap());
+ AllowHandleDereference ahd;
optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
}
output_queue_.Enqueue(optimizing_compiler);
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 5eec342168..fa24bf703b 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -202,9 +202,8 @@ RegExpTree* RegExpBuilder::ToRegExp() {
}
-void RegExpBuilder::AddQuantifierToAtom(int min,
- int max,
- RegExpQuantifier::Type type) {
+void RegExpBuilder::AddQuantifierToAtom(
+ int min, int max, RegExpQuantifier::QuantifierType quantifier_type) {
if (pending_empty_) {
pending_empty_ = false;
return;
@@ -244,7 +243,8 @@ void RegExpBuilder::AddQuantifierToAtom(int min,
UNREACHABLE();
return;
}
- terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom), zone());
+ terms_.Add(
+ new(zone()) RegExpQuantifier(min, max, quantifier_type, atom), zone());
LAST(ADD_TERM);
}
@@ -410,8 +410,8 @@ unsigned* ScriptDataImpl::ReadAddress(int position) {
}
-Scope* Parser::NewScope(Scope* parent, ScopeType type) {
- Scope* result = new(zone()) Scope(parent, type, zone());
+Scope* Parser::NewScope(Scope* parent, ScopeType scope_type) {
+ Scope* result = new(zone()) Scope(parent, scope_type, zone());
result->Initialize();
return result;
}
@@ -490,8 +490,6 @@ Parser::FunctionState::FunctionState(Parser* parser,
: next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
next_handler_index_(0),
expected_property_count_(0),
- only_simple_this_property_assignments_(false),
- this_property_assignments_(isolate->factory()->empty_fixed_array()),
generator_object_variable_(NULL),
parser_(parser),
outer_function_state_(parser->current_function_state_),
@@ -551,6 +549,7 @@ Parser::Parser(CompilationInfo* info)
allow_natives_syntax_(false),
allow_lazy_(false),
allow_generators_(false),
+ allow_for_of_(false),
stack_overflow_(false),
parenthesized_function_(false),
zone_(info->zone()),
@@ -562,6 +561,7 @@ Parser::Parser(CompilationInfo* info)
set_allow_natives_syntax(FLAG_allow_natives_syntax || info->is_native());
set_allow_lazy(false); // Must be explicitly enabled.
set_allow_generators(FLAG_harmony_generators);
+ set_allow_for_of(FLAG_harmony_iteration);
}
@@ -675,8 +675,6 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
function_state.materialized_literal_count(),
function_state.expected_property_count(),
function_state.handler_count(),
- function_state.only_simple_this_property_assignments(),
- function_state.this_property_assignments(),
0,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION,
@@ -762,7 +760,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
info()->is_extended_mode());
ASSERT(info()->language_mode() == shared_info->language_mode());
scope->SetLanguageMode(shared_info->language_mode());
- FunctionLiteral::Type type = shared_info->is_expression()
+ FunctionLiteral::FunctionType function_type = shared_info->is_expression()
? (shared_info->is_anonymous()
? FunctionLiteral::ANONYMOUS_EXPRESSION
: FunctionLiteral::NAMED_EXPRESSION)
@@ -772,7 +770,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
false, // Strict mode name already checked.
shared_info->is_generator(),
RelocInfo::kNoPosition,
- type,
+ function_type,
&ok);
// Make sure the results agree.
ASSERT(ok == (result != NULL));
@@ -803,20 +801,20 @@ Handle<String> Parser::GetSymbol() {
}
-void Parser::ReportMessage(const char* type, Vector<const char*> args) {
+void Parser::ReportMessage(const char* message, Vector<const char*> args) {
Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, type, args);
+ ReportMessageAt(source_location, message, args);
}
-void Parser::ReportMessage(const char* type, Vector<Handle<String> > args) {
+void Parser::ReportMessage(const char* message, Vector<Handle<String> > args) {
Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, type, args);
+ ReportMessageAt(source_location, message, args);
}
void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* type,
+ const char* message,
Vector<const char*> args) {
MessageLocation location(script_,
source_location.beg_pos,
@@ -828,13 +826,13 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
elements->set(i, *arg_string);
}
Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(type, array);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
isolate()->Throw(*result, &location);
}
void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* type,
+ const char* message,
Vector<Handle<String> > args) {
MessageLocation location(script_,
source_location.beg_pos,
@@ -845,183 +843,11 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
elements->set(i, *args[i]);
}
Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(type, array);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
isolate()->Throw(*result, &location);
}
-// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form
-// this.x = ...;, where x is a named property. It also determines whether a
-// function contains only assignments of this type.
-class ThisNamedPropertyAssignmentFinder {
- public:
- ThisNamedPropertyAssignmentFinder(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- only_simple_this_property_assignments_(true),
- names_(0, zone),
- assigned_arguments_(0, zone),
- assigned_constants_(0, zone),
- zone_(zone) {
- }
-
- static Assignment* AsAssignment(Statement* stat) {
- if (stat == NULL) return NULL;
- ExpressionStatement* exp_stat = stat->AsExpressionStatement();
- if (exp_stat == NULL) return NULL;
- return exp_stat->expression()->AsAssignment();
- }
-
- void Update(Scope* scope, Statement* stat) {
- // Bail out if function already has property assignment that are
- // not simple this property assignments.
- if (!only_simple_this_property_assignments_) {
- return;
- }
-
- // Check whether this statement is of the form this.x = ...;
- Assignment* assignment = AsAssignment(stat);
- if (IsThisPropertyAssignment(assignment)) {
- HandleThisPropertyAssignment(scope, assignment);
- } else {
- only_simple_this_property_assignments_ = false;
- }
- }
-
- // Returns whether only statements of the form this.x = y; where y is either a
- // constant or a function argument was encountered.
- bool only_simple_this_property_assignments() {
- return only_simple_this_property_assignments_;
- }
-
- // Returns a fixed array containing three elements for each assignment of the
- // form this.x = y;
- Handle<FixedArray> GetThisPropertyAssignments() {
- if (names_.is_empty()) {
- return isolate_->factory()->empty_fixed_array();
- }
- ASSERT_EQ(names_.length(), assigned_arguments_.length());
- ASSERT_EQ(names_.length(), assigned_constants_.length());
- Handle<FixedArray> assignments =
- isolate_->factory()->NewFixedArray(names_.length() * 3);
- for (int i = 0; i < names_.length(); ++i) {
- assignments->set(i * 3, *names_[i]);
- assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_[i]));
- assignments->set(i * 3 + 2, *assigned_constants_[i]);
- }
- return assignments;
- }
-
- private:
- bool IsThisPropertyAssignment(Assignment* assignment) {
- if (assignment != NULL) {
- Property* property = assignment->target()->AsProperty();
- return assignment->op() == Token::ASSIGN
- && property != NULL
- && property->obj()->AsVariableProxy() != NULL
- && property->obj()->AsVariableProxy()->is_this();
- }
- return false;
- }
-
- void HandleThisPropertyAssignment(Scope* scope, Assignment* assignment) {
- // Check that the property assigned to is a named property, which is not
- // __proto__.
- Property* property = assignment->target()->AsProperty();
- ASSERT(property != NULL);
- Literal* literal = property->key()->AsLiteral();
- uint32_t dummy;
- if (literal != NULL &&
- literal->handle()->IsString() &&
- !String::cast(*(literal->handle()))->Equals(
- isolate_->heap()->proto_string()) &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
- Handle<String> key = Handle<String>::cast(literal->handle());
-
- // Check whether the value assigned is either a constant or matches the
- // name of one of the arguments to the function.
- if (assignment->value()->AsLiteral() != NULL) {
- // Constant assigned.
- Literal* literal = assignment->value()->AsLiteral();
- AssignmentFromConstant(key, literal->handle());
- return;
- } else if (assignment->value()->AsVariableProxy() != NULL) {
- // Variable assigned.
- Handle<String> name =
- assignment->value()->AsVariableProxy()->name();
- // Check whether the variable assigned matches an argument name.
- for (int i = 0; i < scope->num_parameters(); i++) {
- if (*scope->parameter(i)->name() == *name) {
- // Assigned from function argument.
- AssignmentFromParameter(key, i);
- return;
- }
- }
- }
- }
- // It is not a simple "this.x = value;" assignment with a constant
- // or parameter value.
- AssignmentFromSomethingElse();
- }
-
-
-
-
- // We will potentially reorder the property assignments, so they must be
- // simple enough that the ordering does not matter.
- void AssignmentFromParameter(Handle<String> name, int index) {
- EnsureInitialized();
- for (int i = 0; i < names_.length(); ++i) {
- if (name->Equals(*names_[i])) {
- assigned_arguments_[i] = index;
- assigned_constants_[i] = isolate_->factory()->undefined_value();
- return;
- }
- }
- names_.Add(name, zone());
- assigned_arguments_.Add(index, zone());
- assigned_constants_.Add(isolate_->factory()->undefined_value(), zone());
- }
-
- void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
- EnsureInitialized();
- for (int i = 0; i < names_.length(); ++i) {
- if (name->Equals(*names_[i])) {
- assigned_arguments_[i] = -1;
- assigned_constants_[i] = value;
- return;
- }
- }
- names_.Add(name, zone());
- assigned_arguments_.Add(-1, zone());
- assigned_constants_.Add(value, zone());
- }
-
- void AssignmentFromSomethingElse() {
- // The this assignment is not a simple one.
- only_simple_this_property_assignments_ = false;
- }
-
- void EnsureInitialized() {
- if (names_.capacity() == 0) {
- ASSERT(assigned_arguments_.capacity() == 0);
- ASSERT(assigned_constants_.capacity() == 0);
- names_.Initialize(4, zone());
- assigned_arguments_.Initialize(4, zone());
- assigned_constants_.Initialize(4, zone());
- }
- }
-
- Zone* zone() const { return zone_; }
-
- Isolate* isolate_;
- bool only_simple_this_property_assignments_;
- ZoneStringList names_;
- ZoneList<int> assigned_arguments_;
- ZoneObjectList assigned_constants_;
- Zone* zone_;
-};
-
-
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
bool is_eval,
@@ -1037,8 +863,6 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
TargetScope scope(&this->target_stack_);
ASSERT(processor != NULL);
- ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate(),
- zone());
bool directive_prologue = true; // Parsing directive prologue.
while (peek() != end_token) {
@@ -1098,25 +922,9 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
}
}
- // Find and mark all assignments to named properties in this (this.x =)
- if (top_scope_->is_function_scope()) {
- this_property_assignment_finder.Update(top_scope_, stat);
- }
processor->Add(stat, zone());
}
- // Propagate the collected information on this property assignments.
- if (top_scope_->is_function_scope()) {
- bool only_simple_this_property_assignments =
- this_property_assignment_finder.only_simple_this_property_assignments()
- && top_scope_->declarations()->length() == 0;
- if (only_simple_this_property_assignments) {
- current_function_state_->SetThisPropertyAssignmentInfo(
- only_simple_this_property_assignments,
- this_property_assignment_finder.GetThisPropertyAssignments());
- }
- }
-
return 0;
}
@@ -1222,7 +1030,7 @@ Module* Parser::ParseModule(bool* ok) {
}
default: {
- ExpectContextualKeyword("at", CHECK_OK);
+ ExpectContextualKeyword(CStrVector("at"), CHECK_OK);
Module* result = ParseModuleUrl(CHECK_OK);
ExpectSemicolon(CHECK_OK);
return result;
@@ -1394,7 +1202,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
names.Add(name, zone());
}
- ExpectContextualKeyword("from", CHECK_OK);
+ ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
Module* module = ParseModuleSpecifier(CHECK_OK);
ExpectSemicolon(CHECK_OK);
@@ -1732,12 +1540,12 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
*ok = false;
return;
}
- Handle<String> type_string =
+ Handle<String> message_string =
isolate()->factory()->NewStringFromUtf8(CStrVector("Variable"),
TENURED);
Expression* expression =
NewThrowTypeError(isolate()->factory()->redeclaration_string(),
- type_string, name);
+ message_string, name);
declaration_scope->SetIllegalRedeclaration(expression);
}
}
@@ -2539,8 +2347,9 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Scope* declaration_scope = top_scope_->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_eval_scope()) {
- Handle<String> type = isolate()->factory()->illegal_return_string();
- Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
+ Handle<String> message = isolate()->factory()->illegal_return_string();
+ Expression* throw_error =
+ NewThrowSyntaxError(message, Handle<Object>::null());
return factory()->NewExpressionStatement(throw_error);
}
return result;
@@ -2815,6 +2624,90 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
}
+bool Parser::CheckInOrOf(ForEachStatement::VisitMode* visit_mode) {
+ if (Check(Token::IN)) {
+ *visit_mode = ForEachStatement::ENUMERATE;
+ return true;
+ } else if (allow_for_of() && CheckContextualKeyword(CStrVector("of"))) {
+ *visit_mode = ForEachStatement::ITERATE;
+ return true;
+ }
+ return false;
+}
+
+
+void Parser::InitializeForEachStatement(ForEachStatement* stmt,
+ Expression* each,
+ Expression* subject,
+ Statement* body) {
+ ForOfStatement* for_of = stmt->AsForOfStatement();
+
+ if (for_of != NULL) {
+ Factory* heap_factory = isolate()->factory();
+ Handle<String> iterator_str = heap_factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR(".iterator"));
+ Handle<String> result_str = heap_factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR(".result"));
+ Variable* iterator =
+ top_scope_->DeclarationScope()->NewTemporary(iterator_str);
+ Variable* result = top_scope_->DeclarationScope()->NewTemporary(result_str);
+
+ Expression* assign_iterator;
+ Expression* next_result;
+ Expression* result_done;
+ Expression* assign_each;
+
+ // var iterator = iterable;
+ {
+ Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+ assign_iterator = factory()->NewAssignment(
+ Token::ASSIGN, iterator_proxy, subject, RelocInfo::kNoPosition);
+ }
+
+ // var result = iterator.next();
+ {
+ Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+ Expression* next_literal =
+ factory()->NewLiteral(heap_factory->next_string());
+ Expression* next_property = factory()->NewProperty(
+ iterator_proxy, next_literal, RelocInfo::kNoPosition);
+ ZoneList<Expression*>* next_arguments =
+ new(zone()) ZoneList<Expression*>(0, zone());
+ Expression* next_call = factory()->NewCall(
+ next_property, next_arguments, RelocInfo::kNoPosition);
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ next_result = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, next_call, RelocInfo::kNoPosition);
+ }
+
+ // result.done
+ {
+ Expression* done_literal =
+ factory()->NewLiteral(heap_factory->done_string());
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ result_done = factory()->NewProperty(
+ result_proxy, done_literal, RelocInfo::kNoPosition);
+ }
+
+ // each = result.value
+ {
+ Expression* value_literal =
+ factory()->NewLiteral(heap_factory->value_string());
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ Expression* result_value = factory()->NewProperty(
+ result_proxy, value_literal, RelocInfo::kNoPosition);
+ assign_each = factory()->NewAssignment(
+ Token::ASSIGN, each, result_value, RelocInfo::kNoPosition);
+ }
+
+ for_of->Initialize(each, subject, body,
+ assign_iterator, next_result, result_done, assign_each);
+ } else {
+ stmt->Initialize(each, subject, body);
+ }
+}
+
+
Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
@@ -2835,21 +2728,21 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Handle<String> name;
Block* variable_statement =
ParseVariableDeclarations(kForStatement, NULL, NULL, &name, CHECK_OK);
+ ForEachStatement::VisitMode mode;
- if (peek() == Token::IN && !name.is_null()) {
+ if (!name.is_null() && CheckInOrOf(&mode)) {
Interface* interface =
is_const ? Interface::NewConst() : Interface::NewValue();
- ForInStatement* loop = factory()->NewForInStatement(labels);
+ ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
Target target(&this->target_stack_, loop);
- Expect(Token::IN, CHECK_OK);
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
top_scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
- loop->Initialize(each, enumerable, body);
+ InitializeForEachStatement(loop, each, enumerable, body);
Block* result = factory()->NewBlock(NULL, 2, false);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
@@ -2869,7 +2762,9 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
CHECK_OK);
bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
- if (peek() == Token::IN && accept_IN) {
+ ForEachStatement::VisitMode mode;
+
+ if (accept_IN && CheckInOrOf(&mode)) {
// Rewrite a for-in statement of the form
//
// for (let x in e) b
@@ -2891,11 +2786,10 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Handle<String> tempname = heap_factory->InternalizeString(tempstr);
Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
- ForInStatement* loop = factory()->NewForInStatement(labels);
+ ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
Target target(&this->target_stack_, loop);
// The expression does not see the loop variable.
- Expect(Token::IN, CHECK_OK);
top_scope_ = saved_scope;
Expression* enumerable = ParseExpression(true, CHECK_OK);
top_scope_ = for_scope;
@@ -2912,7 +2806,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
body_block->AddStatement(variable_statement, zone());
body_block->AddStatement(assignment_statement, zone());
body_block->AddStatement(body, zone());
- loop->Initialize(temp_proxy, enumerable, body_block);
+ InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
top_scope_ = saved_scope;
for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
@@ -2925,25 +2819,26 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
}
} else {
Expression* expression = ParseExpression(false, CHECK_OK);
- if (peek() == Token::IN) {
+ ForEachStatement::VisitMode mode;
+
+ if (CheckInOrOf(&mode)) {
// Signal a reference error if the expression is an invalid
// left-hand side expression. We could report this as a syntax
// error here but for compatibility with JSC we choose to report
// the error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
+ Handle<String> message =
isolate()->factory()->invalid_lhs_in_for_in_string();
- expression = NewThrowReferenceError(type);
+ expression = NewThrowReferenceError(message);
}
- ForInStatement* loop = factory()->NewForInStatement(labels);
+ ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
Target target(&this->target_stack_, loop);
- Expect(Token::IN, CHECK_OK);
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- if (loop) loop->Initialize(expression, enumerable, body);
+ InitializeForEachStatement(loop, expression, enumerable, body);
top_scope_ = saved_scope;
for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
@@ -2997,10 +2892,10 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
result->AddStatement(init, zone());
result->AddStatement(loop, zone());
result->set_scope(for_scope);
- if (loop) loop->Initialize(NULL, cond, next, body);
+ loop->Initialize(NULL, cond, next, body);
return result;
} else {
- if (loop) loop->Initialize(init, cond, next, body);
+ loop->Initialize(init, cond, next, body);
return loop;
}
}
@@ -3050,9 +2945,9 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// runtime.
// TODO(ES5): Should change parsing for spec conformance.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
+ Handle<String> message =
isolate()->factory()->invalid_lhs_in_assignment_string();
- expression = NewThrowReferenceError(type);
+ expression = NewThrowReferenceError(message);
}
if (!top_scope_->is_classic_mode()) {
@@ -3320,9 +3215,9 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
// error here but for compatibility with JSC we choose to report the
// error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
+ Handle<String> message =
isolate()->factory()->invalid_lhs_in_prefix_op_string();
- expression = NewThrowReferenceError(type);
+ expression = NewThrowReferenceError(message);
}
if (!top_scope_->is_classic_mode()) {
@@ -3355,9 +3250,9 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// error here but for compatibility with JSC we choose to report the
// error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
+ Handle<String> message =
isolate()->factory()->invalid_lhs_in_postfix_op_string();
- expression = NewThrowReferenceError(type);
+ expression = NewThrowReferenceError(message);
}
if (!top_scope_->is_classic_mode()) {
@@ -3435,6 +3330,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
top_scope_->DeclarationScope()->RecordEvalCall();
}
result = factory()->NewCall(result, args, pos);
+ if (fni_ != NULL) fni_->RemoveLastFunction();
break;
}
@@ -3515,14 +3411,14 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
CHECK_OK);
}
- FunctionLiteral::Type type = name.is_null()
+ FunctionLiteral::FunctionType function_type = name.is_null()
? FunctionLiteral::ANONYMOUS_EXPRESSION
: FunctionLiteral::NAMED_EXPRESSION;
result = ParseFunctionLiteral(name,
is_strict_reserved_name,
is_generator,
function_token_position,
- type,
+ function_type,
CHECK_OK);
} else {
result = ParsePrimaryExpression(CHECK_OK);
@@ -3785,7 +3681,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
if (boilerplate_value->IsTheHole()) {
is_holey = true;
- } else if (boilerplate_value->IsUndefined()) {
+ } else if (boilerplate_value->IsUninitialized()) {
is_simple = false;
JSObject::SetOwnElement(
array, i, handle(Smi::FromInt(0), isolate()), kNonStrictMode);
@@ -3844,30 +3740,32 @@ bool CompileTimeValue::ArrayLiteralElementNeedsInitialization(
Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
+ Factory* factory = Isolate::Current()->factory();
ASSERT(IsCompileTimeValue(expression));
- Handle<FixedArray> result = FACTORY->NewFixedArray(2, TENURED);
+ Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
ObjectLiteral* object_literal = expression->AsObjectLiteral();
if (object_literal != NULL) {
ASSERT(object_literal->is_simple());
if (object_literal->fast_elements()) {
- result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
+ result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
} else {
- result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
+ result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
}
result->set(kElementsSlot, *object_literal->constant_properties());
} else {
ArrayLiteral* array_literal = expression->AsArrayLiteral();
ASSERT(array_literal != NULL && array_literal->is_simple());
- result->set(kTypeSlot, Smi::FromInt(ARRAY_LITERAL));
+ result->set(kLiteralTypeSlot, Smi::FromInt(ARRAY_LITERAL));
result->set(kElementsSlot, *array_literal->constant_elements());
}
return result;
}
-CompileTimeValue::Type CompileTimeValue::GetType(Handle<FixedArray> value) {
- Smi* type_value = Smi::cast(value->get(kTypeSlot));
- return static_cast<Type>(type_value->value());
+CompileTimeValue::LiteralType CompileTimeValue::GetLiteralType(
+ Handle<FixedArray> value) {
+ Smi* literal_type = Smi::cast(value->get(kLiteralTypeSlot));
+ return static_cast<LiteralType>(literal_type->value());
}
@@ -3883,7 +3781,7 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
if (CompileTimeValue::IsCompileTimeValue(expression)) {
return CompileTimeValue::GetValue(expression);
}
- return isolate()->factory()->undefined_value();
+ return isolate()->factory()->uninitialized_value();
}
// Validation per 11.1.5 Object Initialiser
@@ -3994,13 +3892,17 @@ void Parser::BuildObjectLiteralConstantProperties(
Handle<Object> key = property->key()->handle();
Handle<Object> value = GetBoilerplateValue(property->value());
- // Ensure objects with doubles are always treated as nested objects.
+ // Ensure objects that may, at any point in time, contain fields with double
+ // representation are always treated as nested objects. This is true for
+ // computed fields (value is undefined), and smi and double literals
+ // (value->IsNumber()).
// TODO(verwaest): Remove once we can store them inline.
- if (FLAG_track_double_fields && value->IsNumber()) {
+ if (FLAG_track_double_fields &&
+ (value->IsNumber() || value->IsUninitialized())) {
*may_store_doubles = true;
}
- is_simple_acc = is_simple_acc && !value->IsUndefined();
+ is_simple_acc = is_simple_acc && !value->IsUninitialized();
// Keep track of the number of elements in the object literal and
// the largest element index. If the largest element index is
@@ -4355,12 +4257,13 @@ class SingletonLogger : public ParserRecorder {
};
-FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
- bool name_is_strict_reserved,
- bool is_generator,
- int function_token_position,
- FunctionLiteral::Type type,
- bool* ok) {
+FunctionLiteral* Parser::ParseFunctionLiteral(
+ Handle<String> function_name,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType function_type,
+ bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
@@ -4378,15 +4281,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// Function declarations are function scoped in normal mode, so they are
// hoisted. In harmony block scoping mode they are block scoped, so they
// are not hoisted.
- Scope* scope = (type == FunctionLiteral::DECLARATION && !is_extended_mode())
+ Scope* scope =
+ (function_type == FunctionLiteral::DECLARATION && !is_extended_mode())
? NewScope(top_scope_->DeclarationScope(), FUNCTION_SCOPE)
: NewScope(top_scope_, FUNCTION_SCOPE);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
int handler_count = 0;
- bool only_simple_this_property_assignments;
- Handle<FixedArray> this_property_assignments;
FunctionLiteral::ParameterFlag duplicate_parameters =
FunctionLiteral::kNoDuplicateParameters;
FunctionLiteral::IsParenthesizedFlag parenthesized = parenthesized_function_
@@ -4466,7 +4368,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// instead of Variables and Proxis as is the case now.
Variable* fvar = NULL;
Token::Value fvar_init_op = Token::INIT_CONST;
- if (type == FunctionLiteral::NAMED_EXPRESSION) {
+ if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
if (is_extended_mode()) fvar_init_op = Token::INIT_CONST_HARMONY;
VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
fvar = new(zone()) Variable(top_scope_,
@@ -4517,8 +4419,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
top_scope_->SetLanguageMode(entry.language_mode());
- only_simple_this_property_assignments = false;
- this_property_assignments = isolate()->factory()->empty_fixed_array();
} else {
is_lazily_compiled = false;
}
@@ -4553,8 +4453,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
materialized_literal_count = logger.literals();
expected_property_count = logger.properties();
top_scope_->SetLanguageMode(logger.language_mode());
- only_simple_this_property_assignments = false;
- this_property_assignments = isolate()->factory()->empty_fixed_array();
}
}
@@ -4607,9 +4505,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
handler_count = function_state.handler_count();
- only_simple_this_property_assignments =
- function_state.only_simple_this_property_assignments();
- this_property_assignments = function_state.this_property_assignments();
Expect(Token::RBRACE, CHECK_OK);
scope->set_end_position(scanner().location().end_pos);
@@ -4675,11 +4570,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
materialized_literal_count,
expected_property_count,
handler_count,
- only_simple_this_property_assignments,
- this_property_assignments,
num_parameters,
duplicate_parameters,
- type,
+ function_type,
FunctionLiteral::kIsFunction,
parenthesized,
generator);
@@ -4706,6 +4599,7 @@ preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax());
reusable_preparser_->set_allow_lazy(true);
reusable_preparser_->set_allow_generators(allow_generators());
+ reusable_preparser_->set_allow_for_of(allow_for_of());
}
preparser::PreParser::PreParseResult result =
reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
@@ -4803,6 +4697,16 @@ bool Parser::Check(Token::Value token) {
}
+bool Parser::CheckContextualKeyword(Vector<const char> keyword) {
+ if (peek() == Token::IDENTIFIER &&
+ scanner().is_next_contextual_keyword(keyword)) {
+ Consume(Token::IDENTIFIER);
+ return true;
+ }
+ return false;
+}
+
+
void Parser::ExpectSemicolon(bool* ok) {
// Check for automatic semicolon insertion according to
// the rules given in ECMA-262, section 7.9, page 21.
@@ -4820,12 +4724,10 @@ void Parser::ExpectSemicolon(bool* ok) {
}
-void Parser::ExpectContextualKeyword(const char* keyword, bool* ok) {
+void Parser::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
Expect(Token::IDENTIFIER, ok);
if (!*ok) return;
- Handle<String> symbol = GetSymbol();
- if (!*ok) return;
- if (!symbol->IsUtf8EqualTo(CStrVector(keyword))) {
+ if (!scanner().is_literal_contextual_keyword(keyword)) {
*ok = false;
ReportUnexpectedToken(scanner().current_token());
}
@@ -5025,22 +4927,22 @@ void Parser::RegisterTargetUse(Label* target, Target* stop) {
}
-Expression* Parser::NewThrowReferenceError(Handle<String> type) {
+Expression* Parser::NewThrowReferenceError(Handle<String> message) {
return NewThrowError(isolate()->factory()->MakeReferenceError_string(),
- type, HandleVector<Object>(NULL, 0));
+ message, HandleVector<Object>(NULL, 0));
}
-Expression* Parser::NewThrowSyntaxError(Handle<String> type,
+Expression* Parser::NewThrowSyntaxError(Handle<String> message,
Handle<Object> first) {
int argc = first.is_null() ? 0 : 1;
Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc);
return NewThrowError(
- isolate()->factory()->MakeSyntaxError_string(), type, arguments);
+ isolate()->factory()->MakeSyntaxError_string(), message, arguments);
}
-Expression* Parser::NewThrowTypeError(Handle<String> type,
+Expression* Parser::NewThrowTypeError(Handle<String> message,
Handle<Object> first,
Handle<Object> second) {
ASSERT(!first.is_null() && !second.is_null());
@@ -5048,12 +4950,12 @@ Expression* Parser::NewThrowTypeError(Handle<String> type,
Vector< Handle<Object> > arguments =
HandleVector<Object>(elements, ARRAY_SIZE(elements));
return NewThrowError(
- isolate()->factory()->MakeTypeError_string(), type, arguments);
+ isolate()->factory()->MakeTypeError_string(), message, arguments);
}
Expression* Parser::NewThrowError(Handle<String> constructor,
- Handle<String> type,
+ Handle<String> message,
Vector< Handle<Object> > arguments) {
int argc = arguments.length();
Handle<FixedArray> elements = isolate()->factory()->NewFixedArray(argc,
@@ -5068,7 +4970,7 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
elements, FAST_ELEMENTS, TENURED);
ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2, zone());
- args->Add(factory()->NewLiteral(type), zone());
+ args->Add(factory()->NewLiteral(message), zone());
args->Add(factory()->NewLiteral(array), zone());
CallRuntime* call_constructor =
factory()->NewCallRuntime(constructor, NULL, args);
@@ -5130,6 +5032,7 @@ void RegExpParser::Advance() {
void RegExpParser::Reset(int pos) {
next_pos_ = pos;
+ has_more_ = (pos < in()->length());
Advance();
}
@@ -5208,20 +5111,21 @@ RegExpTree* RegExpParser::ParseDisjunction() {
int end_capture_index = captures_started();
int capture_index = stored_state->capture_index();
- SubexpressionType type = stored_state->group_type();
+ SubexpressionType group_type = stored_state->group_type();
// Restore previous state.
stored_state = stored_state->previous_state();
builder = stored_state->builder();
// Build result of subexpression.
- if (type == CAPTURE) {
+ if (group_type == CAPTURE) {
RegExpCapture* capture = new(zone()) RegExpCapture(body, capture_index);
captures_->at(capture_index - 1) = capture;
body = capture;
- } else if (type != GROUPING) {
- ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD);
- bool is_positive = (type == POSITIVE_LOOKAHEAD);
+ } else if (group_type != GROUPING) {
+ ASSERT(group_type == POSITIVE_LOOKAHEAD ||
+ group_type == NEGATIVE_LOOKAHEAD);
+ bool is_positive = (group_type == POSITIVE_LOOKAHEAD);
body = new(zone()) RegExpLookahead(body,
is_positive,
end_capture_index - capture_index,
@@ -5255,10 +5159,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
case '$': {
Advance();
- RegExpAssertion::Type type =
+ RegExpAssertion::AssertionType assertion_type =
multiline_ ? RegExpAssertion::END_OF_LINE :
RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(new(zone()) RegExpAssertion(type));
+ builder->AddAssertion(new(zone()) RegExpAssertion(assertion_type));
continue;
}
case '.': {
@@ -5272,18 +5176,18 @@ RegExpTree* RegExpParser::ParseDisjunction() {
break;
}
case '(': {
- SubexpressionType type = CAPTURE;
+ SubexpressionType subexpr_type = CAPTURE;
Advance();
if (current() == '?') {
switch (Next()) {
case ':':
- type = GROUPING;
+ subexpr_type = GROUPING;
break;
case '=':
- type = POSITIVE_LOOKAHEAD;
+ subexpr_type = POSITIVE_LOOKAHEAD;
break;
case '!':
- type = NEGATIVE_LOOKAHEAD;
+ subexpr_type = NEGATIVE_LOOKAHEAD;
break;
default:
ReportError(CStrVector("Invalid group") CHECK_FAILED);
@@ -5300,7 +5204,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
captures_->Add(NULL, zone());
}
// Store current state and begin new disjunction parsing.
- stored_state = new(zone()) RegExpParserState(stored_state, type,
+ stored_state = new(zone()) RegExpParserState(stored_state, subexpr_type,
captures_started(), zone());
builder = stored_state->builder();
continue;
@@ -5488,16 +5392,16 @@ RegExpTree* RegExpParser::ParseDisjunction() {
default:
continue;
}
- RegExpQuantifier::Type type = RegExpQuantifier::GREEDY;
+ RegExpQuantifier::QuantifierType quantifier_type = RegExpQuantifier::GREEDY;
if (current() == '?') {
- type = RegExpQuantifier::NON_GREEDY;
+ quantifier_type = RegExpQuantifier::NON_GREEDY;
Advance();
} else if (FLAG_regexp_possessive_quantifier && current() == '+') {
// FLAG_regexp_possessive_quantifier is a debug-only flag.
- type = RegExpQuantifier::POSSESSIVE;
+ quantifier_type = RegExpQuantifier::POSSESSIVE;
Advance();
}
- builder->AddQuantifierToAtom(min, max, type);
+ builder->AddQuantifierToAtom(min, max, quantifier_type);
}
}
@@ -5961,6 +5865,7 @@ ScriptDataImpl* PreParserApi::PreParse(Utf16CharacterStream* source) {
preparser::PreParser preparser(&scanner, &recorder, stack_limit);
preparser.set_allow_lazy(true);
preparser.set_allow_generators(FLAG_harmony_generators);
+ preparser.set_allow_for_of(FLAG_harmony_iteration);
preparser.set_allow_harmony_scoping(FLAG_harmony_scoping);
scanner.Initialize(source);
preparser::PreParser::PreParseResult result = preparser.PreParseProgram();
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 8a3ae92906..b7e0700009 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -269,7 +269,8 @@ class RegExpBuilder: public ZoneObject {
void AddAtom(RegExpTree* tree);
void AddAssertion(RegExpTree* tree);
void NewAlternative(); // '|'
- void AddQuantifierToAtom(int min, int max, RegExpQuantifier::Type type);
+ void AddQuantifierToAtom(
+ int min, int max, RegExpQuantifier::QuantifierType type);
RegExpTree* ToRegExp();
private:
@@ -436,6 +437,7 @@ class Parser BASE_EMBEDDED {
bool allow_modules() { return scanner().HarmonyModules(); }
bool allow_harmony_scoping() { return scanner().HarmonyScoping(); }
bool allow_generators() const { return allow_generators_; }
+ bool allow_for_of() const { return allow_for_of_; }
void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
@@ -444,6 +446,7 @@ class Parser BASE_EMBEDDED {
scanner().SetHarmonyScoping(allow);
}
void set_allow_generators(bool allow) { allow_generators_ = allow; }
+ void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
@@ -501,20 +504,6 @@ class Parser BASE_EMBEDDED {
int NextHandlerIndex() { return next_handler_index_++; }
int handler_count() { return next_handler_index_; }
- void SetThisPropertyAssignmentInfo(
- bool only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments) {
- only_simple_this_property_assignments_ =
- only_simple_this_property_assignments;
- this_property_assignments_ = this_property_assignments;
- }
- bool only_simple_this_property_assignments() {
- return only_simple_this_property_assignments_;
- }
- Handle<FixedArray> this_property_assignments() {
- return this_property_assignments_;
- }
-
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
@@ -544,11 +533,6 @@ class Parser BASE_EMBEDDED {
// Properties count estimation.
int expected_property_count_;
- // Keeps track of assignments to properties of this. Used for
- // optimizing constructors.
- bool only_simple_this_property_assignments_;
- Handle<FixedArray> this_property_assignments_;
-
// For generators, the variable that holds the generator object. This
// variable is used by yield expressions and return statements. NULL
// indicates that this function is not a generator.
@@ -704,12 +688,18 @@ class Parser BASE_EMBEDDED {
// in the object literal boilerplate.
Handle<Object> GetBoilerplateValue(Expression* expression);
+ // Initialize the components of a for-in / for-of statement.
+ void InitializeForEachStatement(ForEachStatement* stmt,
+ Expression* each,
+ Expression* subject,
+ Statement* body);
+
ZoneList<Expression*>* ParseArguments(bool* ok);
FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
bool name_is_reserved,
bool is_generator,
int function_token_position,
- FunctionLiteral::Type type,
+ FunctionLiteral::FunctionType type,
bool* ok);
@@ -739,13 +729,16 @@ class Parser BASE_EMBEDDED {
bool is_generator() const { return current_function_state_->is_generator(); }
+ bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode);
+
bool peek_any_identifier();
INLINE(void Consume(Token::Value token));
void Expect(Token::Value token, bool* ok);
bool Check(Token::Value token);
void ExpectSemicolon(bool* ok);
- void ExpectContextualKeyword(const char* keyword, bool* ok);
+ bool CheckContextualKeyword(Vector<const char> keyword);
+ void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
Handle<String> LiteralString(PretenureFlag tenured) {
if (scanner().is_literal_ascii()) {
@@ -868,6 +861,7 @@ class Parser BASE_EMBEDDED {
bool allow_natives_syntax_;
bool allow_lazy_;
bool allow_generators_;
+ bool allow_for_of_;
bool stack_overflow_;
// If true, the next (and immediately following) function literal is
// preceded by a parenthesis.
@@ -886,7 +880,7 @@ class Parser BASE_EMBEDDED {
// can be fully handled at compile time.
class CompileTimeValue: public AllStatic {
public:
- enum Type {
+ enum LiteralType {
OBJECT_LITERAL_FAST_ELEMENTS,
OBJECT_LITERAL_SLOW_ELEMENTS,
ARRAY_LITERAL
@@ -900,13 +894,13 @@ class CompileTimeValue: public AllStatic {
static Handle<FixedArray> GetValue(Expression* expression);
// Get the type of a compile time value returned by GetValue().
- static Type GetType(Handle<FixedArray> value);
+ static LiteralType GetLiteralType(Handle<FixedArray> value);
// Get the elements array of a compile time value returned by GetValue().
static Handle<FixedArray> GetElements(Handle<FixedArray> value);
private:
- static const int kTypeSlot = 0;
+ static const int kLiteralTypeSlot = 0;
static const int kElementsSlot = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index a4d03b0aca..22f2245f48 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -591,6 +591,10 @@ void OS::SignalCodeMovingGC() {
// kernel log.
int size = sysconf(_SC_PAGESIZE);
FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
+ if (f == NULL) {
+ OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
+ OS::Abort();
+ }
void* addr = mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_EXEC,
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index 380c15f21a..0a7cc80f3d 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -344,6 +344,10 @@ void OS::SignalCodeMovingGC() {
// kernel log.
int size = sysconf(_SC_PAGESIZE);
FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
+ if (f == NULL) {
+ OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
+ OS::Abort();
+ }
void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
fileno(f), 0);
ASSERT(addr != MAP_FAILED);
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 054d5b5a50..f76ec44332 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -115,26 +115,11 @@ void* OS::GetRandomMmapAddr() {
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
uint32_t raw_addr = V8::RandomPrivate(isolate);
-
- raw_addr &= 0x3ffff000;
-
-# ifdef __sun
- // For our Solaris/illumos mmap hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); Solaris and
- // illumos will try the hint and if that fails allocate as if there were
- // no hint at all. The high hint prevents the break from getting hemmed in
- // at low values, ceding half of the address space to the system heap.
- raw_addr += 0x80000000;
-# else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
+ raw_addr &= 0x3ffff000;
raw_addr += 0x20000000;
-# endif
#endif
return reinterpret_cast<void*>(raw_addr);
}
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 3bf88cad35..828177aee0 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -659,6 +659,18 @@ PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
}
+bool PreParser::CheckInOrOf() {
+ if (peek() == i::Token::IN ||
+ (allow_for_of() &&
+ peek() == i::Token::IDENTIFIER &&
+ scanner_->is_next_contextual_keyword(v8::internal::CStrVector("of")))) {
+ Next();
+ return true;
+ }
+ return false;
+}
+
+
PreParser::Statement PreParser::ParseForStatement(bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
@@ -675,8 +687,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
kForStatement, &decl_props, &decl_count, CHECK_OK);
bool accept_IN = decl_count == 1 &&
!(is_let && decl_props == kHasInitializers);
- if (peek() == i::Token::IN && accept_IN) {
- Expect(i::Token::IN, CHECK_OK);
+ if (accept_IN && CheckInOrOf()) {
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
@@ -685,8 +696,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
} else {
ParseExpression(false, CHECK_OK);
- if (peek() == i::Token::IN) {
- Expect(i::Token::IN, CHECK_OK);
+ if (CheckInOrOf()) {
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index e3a036f15f..786316ed50 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -130,6 +130,7 @@ class PreParser {
allow_lazy_(false),
allow_natives_syntax_(false),
allow_generators_(false),
+ allow_for_of_(false),
parenthesized_function_(false) { }
~PreParser() {}
@@ -139,6 +140,7 @@ class PreParser {
bool allow_modules() const { return scanner_->HarmonyModules(); }
bool allow_harmony_scoping() const { return scanner_->HarmonyScoping(); }
bool allow_generators() const { return allow_generators_; }
+ bool allow_for_of() const { return allow_for_of_; }
void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
@@ -147,6 +149,7 @@ class PreParser {
scanner_->SetHarmonyScoping(allow);
}
void set_allow_generators(bool allow) { allow_generators_ = allow; }
+ void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
@@ -655,6 +658,8 @@ class PreParser {
}
void ExpectSemicolon(bool* ok);
+ bool CheckInOrOf();
+
static int Precedence(i::Token::Value tok, bool accept_IN);
void SetStrictModeViolation(i::Scanner::Location,
@@ -678,6 +683,7 @@ class PreParser {
bool allow_lazy_;
bool allow_natives_syntax_;
bool allow_generators_;
+ bool allow_for_of_;
bool parenthesized_function_;
};
} } // v8::preparser
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 3a1eca7c6b..23cad95692 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -255,6 +255,17 @@ void PrettyPrinter::VisitForInStatement(ForInStatement* node) {
}
+void PrettyPrinter::VisitForOfStatement(ForOfStatement* node) {
+ PrintLabels(node->labels());
+ Print("for (");
+ Visit(node->each());
+ Print(" of ");
+ Visit(node->iterable());
+ Print(") ");
+ Visit(node->body());
+}
+
+
void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
Print("try ");
Visit(node->try_block());
@@ -929,6 +940,14 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
}
+void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
+ IndentedScope indent(this, "FOR OF");
+ PrintIndentedVisit("FOR", node->each());
+ PrintIndentedVisit("OF", node->iterable());
+ PrintIndentedVisit("BODY", node->body());
+}
+
+
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
IndentedScope indent(this, "TRY CATCH");
PrintIndentedVisit("TRY", node->try_block());
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index d923bc04e8..5418979cc5 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -68,7 +68,6 @@ int TokenEnumerator::GetTokenId(Object* token) {
// to a token object in the V8's heap.
isolate->global_handles()->MakeWeak(handle.location(),
this,
- NULL,
TokenRemovedCallback);
token_locations_.Add(handle.location());
token_removed_.Add(false);
@@ -77,11 +76,11 @@ int TokenEnumerator::GetTokenId(Object* token) {
void TokenEnumerator::TokenRemovedCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
+ v8::Persistent<v8::Value>* handle,
void* parameter) {
reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
- Utils::OpenHandle(*handle).location());
- handle.Dispose(isolate);
+ Utils::OpenHandle(**handle).location());
+ handle->Dispose(isolate);
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 37cc57d2d2..7a5e1f2fc5 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -48,7 +48,7 @@ class TokenEnumerator {
private:
static void TokenRemovedCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
+ v8::Persistent<v8::Value>* handle,
void* parameter);
void TokenRemoved(Object** token_location);
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 674fc8869a..669b05dca0 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -39,7 +39,7 @@ enum PropertyAttributes {
DONT_ENUM = v8::DontEnum,
DONT_DELETE = v8::DontDelete,
- SEALED = DONT_ENUM | DONT_DELETE,
+ SEALED = DONT_DELETE,
FROZEN = SEALED | READ_ONLY,
SYMBOLIC = 8, // Used to filter symbol names
@@ -113,7 +113,7 @@ class Representation {
bool is_more_general_than(const Representation& other) const {
ASSERT(kind_ != kExternal);
ASSERT(other.kind_ != kExternal);
- if (IsHeapObject()) return other.IsDouble();
+ if (IsHeapObject()) return other.IsDouble() || other.IsNone();
return kind_ > other.kind_;
}
@@ -131,7 +131,9 @@ class Representation {
bool IsNone() const { return kind_ == kNone; }
bool IsTagged() const { return kind_ == kTagged; }
bool IsSmi() const { return kind_ == kSmi; }
+ bool IsSmiOrTagged() const { return IsSmi() || IsTagged(); }
bool IsInteger32() const { return kind_ == kInteger32; }
+ bool IsSmiOrInteger32() const { return IsSmi() || IsInteger32(); }
bool IsDouble() const { return kind_ == kDouble; }
bool IsHeapObject() const { return kind_ == kHeapObject; }
bool IsExternal() const { return kind_ == kExternal; }
@@ -167,10 +169,12 @@ class PropertyDetails BASE_EMBEDDED {
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
- Representation representation) {
+ Representation representation,
+ int field_index = 0) {
value_ = TypeField::encode(type)
| AttributesField::encode(attributes)
- | RepresentationField::encode(EncodeRepresentation(representation));
+ | RepresentationField::encode(EncodeRepresentation(representation))
+ | FieldIndexField::encode(field_index);
}
int pointer() { return DescriptorPointer::decode(value_); }
@@ -180,6 +184,11 @@ class PropertyDetails BASE_EMBEDDED {
PropertyDetails CopyWithRepresentation(Representation representation) {
return PropertyDetails(value_, representation);
}
+ PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) {
+ new_attributes =
+ static_cast<PropertyAttributes>(attributes() | new_attributes);
+ return PropertyDetails(value_, new_attributes);
+ }
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
@@ -204,9 +213,14 @@ class PropertyDetails BASE_EMBEDDED {
}
Representation representation() {
+ ASSERT(type() != NORMAL);
return DecodeRepresentation(RepresentationField::decode(value_));
}
+ int field_index() {
+ return FieldIndexField::decode(value_);
+ }
+
inline PropertyDetails AsDeleted();
static bool IsValidIndex(int index) {
@@ -222,10 +236,15 @@ class PropertyDetails BASE_EMBEDDED {
// constants can be embedded in generated code.
class TypeField: public BitField<PropertyType, 0, 3> {};
class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
+
+ // Bit fields for normalized objects.
class DeletedField: public BitField<uint32_t, 6, 1> {};
class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
- class DescriptorPointer: public BitField<uint32_t, 7, 11> {};
- class RepresentationField: public BitField<uint32_t, 18, 3> {};
+
+ // Bit fields for fast objects.
+ class DescriptorPointer: public BitField<uint32_t, 6, 11> {};
+ class RepresentationField: public BitField<uint32_t, 17, 3> {};
+ class FieldIndexField: public BitField<uint32_t, 20, 11> {};
static const int kInitialIndex = 1;
@@ -237,6 +256,9 @@ class PropertyDetails BASE_EMBEDDED {
value_ = RepresentationField::update(
value, EncodeRepresentation(representation));
}
+ PropertyDetails(int value, PropertyAttributes attributes) {
+ value_ = AttributesField::update(value, attributes);
+ }
uint32_t value_;
};
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 606f111525..f853fc8ba0 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -44,10 +44,6 @@ namespace internal {
class Descriptor BASE_EMBEDDED {
public:
- static int IndexFromValue(Object* value) {
- return Smi::cast(value)->value();
- }
-
MUST_USE_RESULT MaybeObject* KeyToUniqueName() {
if (!key_->IsUniqueName()) {
MaybeObject* maybe_result = HEAP->InternalizeString(String::cast(key_));
@@ -89,10 +85,11 @@ class Descriptor BASE_EMBEDDED {
Object* value,
PropertyAttributes attributes,
PropertyType type,
- Representation representation)
+ Representation representation,
+ int field_index = 0)
: key_(key),
value_(value),
- details_(attributes, type, representation) { }
+ details_(attributes, type, representation, field_index) { }
friend class DescriptorArray;
};
@@ -104,8 +101,8 @@ class FieldDescriptor: public Descriptor {
int field_index,
PropertyAttributes attributes,
Representation representation)
- : Descriptor(key, Smi::FromInt(field_index), attributes,
- FIELD, representation) {}
+ : Descriptor(key, Smi::FromInt(0), attributes,
+ FIELD, representation, field_index) {}
};
@@ -206,6 +203,8 @@ class LookupResult BASE_EMBEDDED {
}
bool CanHoldValue(Handle<Object> value) {
+ if (IsNormal()) return true;
+ ASSERT(!IsTransition());
return value->FitsRepresentation(details_.representation());
}
@@ -311,7 +310,6 @@ class LookupResult BASE_EMBEDDED {
bool IsDontDelete() { return details_.IsDontDelete(); }
bool IsDontEnum() { return details_.IsDontEnum(); }
- bool IsDeleted() { return details_.IsDeleted(); }
bool IsFound() { return lookup_type_ != NOT_FOUND; }
bool IsTransition() { return lookup_type_ == TRANSITION_TYPE; }
bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
@@ -417,14 +415,12 @@ class LookupResult BASE_EMBEDDED {
PropertyIndex GetFieldIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
ASSERT(IsField());
- return PropertyIndex::NewFieldIndex(
- Descriptor::IndexFromValue(GetValue()));
+ return PropertyIndex::NewFieldIndex(GetFieldIndexFromMap(holder()->map()));
}
int GetLocalFieldIndexFromMap(Map* map) {
ASSERT(IsField());
- return Descriptor::IndexFromValue(GetValueFromMap(map)) -
- map->inobject_properties();
+ return GetFieldIndexFromMap(map) - map->inobject_properties();
}
int GetDictionaryEntry() {
@@ -466,6 +462,12 @@ class LookupResult BASE_EMBEDDED {
return map->instance_descriptors()->GetValue(number_);
}
+ int GetFieldIndexFromMap(Map* map) const {
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+ ASSERT(number_ < map->NumberOfOwnDescriptors());
+ return map->instance_descriptors()->GetFieldIndex(number_);
+ }
+
void Iterate(ObjectVisitor* visitor);
private:
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc
index e678d607ad..3b9a2f6603 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc
@@ -44,8 +44,8 @@ RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer,
buffer_(buffer),
pc_(0),
own_buffer_(false),
- advance_current_end_(kInvalidPC) {
-}
+ advance_current_end_(kInvalidPC),
+ isolate_(zone->isolate()) { }
RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
@@ -410,28 +410,6 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
}
-void RegExpMacroAssemblerIrregexp::CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- ASSERT(cp_offset >= kMinCPOffset);
- ASSERT(cp_offset + str.length() - 1 <= kMaxCPOffset);
- // It is vital that this loop is backwards due to the unchecked character
- // load below.
- for (int i = str.length() - 1; i >= 0; i--) {
- if (check_end_of_string && i == str.length() - 1) {
- Emit(BC_LOAD_CURRENT_CHAR, cp_offset + i);
- EmitOrLink(on_failure);
- } else {
- Emit(BC_LOAD_CURRENT_CHAR_UNCHECKED, cp_offset + i);
- }
- Emit(BC_CHECK_NOT_CHAR, str[i]);
- EmitOrLink(on_failure);
- }
-}
-
-
void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
int comparand,
Label* on_less_than) {
@@ -467,7 +445,7 @@ Handle<HeapObject> RegExpMacroAssemblerIrregexp::GetCode(
Handle<String> source) {
Bind(&backtrack_);
Emit(BC_POP_BT, 0);
- Handle<ByteArray> array = FACTORY->NewByteArray(length());
+ Handle<ByteArray> array = isolate_->factory()->NewByteArray(length());
Copy(array->GetDataStartAddress());
return array;
}
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp-macro-assembler-irregexp.h
index 4bc29809bd..f8a412d4f8 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.h
@@ -103,10 +103,6 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
virtual void IfRegisterEqPos(int register_index, Label* if_eq);
@@ -138,6 +134,8 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
int advance_current_offset_;
int advance_current_end_;
+ Isolate* isolate_;
+
static const int kInvalidPC = -1;
DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index f878e8c460..1ce1fa4b24 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -383,21 +383,6 @@ void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
}
-void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- PrintF(" %s(str=\"",
- check_end_of_string ? "CheckCharacters" : "CheckCharactersUnchecked");
- for (int i = 0; i < str.length(); i++) {
- PrintF("0x%04x", str[i]);
- }
- PrintF("\", cp_offset=%d, label[%08x])\n",
- cp_offset, LabelToInt(on_failure));
- assembler_->CheckCharacters(str, cp_offset, on_failure, check_end_of_string);
-}
-
-
bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
uc16 type,
Label* on_no_match) {
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp-macro-assembler-tracer.h
index ac262df76f..852fb80417 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp-macro-assembler-tracer.h
@@ -49,11 +49,6 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index 3ebf5a8e00..fa792768bc 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -113,8 +113,8 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
ASSERT(previous_index <= subject->length());
// No allocations before calling the regexp, but we can't use
- // AssertNoAllocation, since regexps might be preempted, and another thread
- // might do allocation anyway.
+ // DisallowHeapAllocation, since regexps might be preempted, and another
+ // thread might do allocation anyway.
String* subject_ptr = *subject;
// Character offsets into string.
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index 211ab6ba39..1ff8bd9797 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -87,17 +87,6 @@ class RegExpMacroAssembler {
Label* on_equal) = 0;
virtual void CheckCharacterGT(uc16 limit, Label* on_greater) = 0;
virtual void CheckCharacterLT(uc16 limit, Label* on_less) = 0;
- // Check the current character for a match with a literal string. If we
- // fail to match then goto the on_failure label. If check_eos is set then
- // the end of input always fails. If check_eos is clear then it is the
- // caller's responsibility to ensure that the end of string is not hit.
- // If the label is NULL then we should pop a backtrack address off
- // the stack and go to that.
- virtual void CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_eos) = 0;
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position) = 0;
virtual void CheckNotAtStart(Label* on_not_at_start) = 0;
virtual void CheckNotBackReference(int start_reg, Label* on_no_match) = 0;
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 44fe0504e5..df5c353415 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -168,6 +168,11 @@ void Processor::VisitForInStatement(ForInStatement* node) {
}
+void Processor::VisitForOfStatement(ForOfStatement* node) {
+ VisitIterationStatement(node);
+}
+
+
void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
// Rewrite both try and catch blocks (reversed order).
bool set_after_catch = is_set_;
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 61b3549dde..eccf6ea4c8 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -28,8 +28,6 @@
#include <stdlib.h>
#include <limits>
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "accessors.h"
@@ -427,7 +425,7 @@ static Handle<Object> CreateLiteralBoilerplate(
Handle<FixedArray> array) {
Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
const bool kHasNoFunctionLiteral = false;
- switch (CompileTimeValue::GetType(array)) {
+ switch (CompileTimeValue::GetLiteralType(array)) {
case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
return CreateObjectLiteralBoilerplate(isolate,
literals,
@@ -570,7 +568,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateSymbol) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(Symbol, symbol, 0);
return symbol->name();
@@ -578,7 +576,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
Object* prototype = args[1];
@@ -589,7 +587,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
Object* call_trap = args[1];
@@ -604,7 +602,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
return isolate->heap()->ToBoolean(obj->IsJSProxy());
@@ -612,7 +610,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy());
@@ -620,7 +618,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
return proxy->handler();
@@ -628,7 +626,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
return proxy->call_trap();
@@ -636,7 +634,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
return proxy->construct_trap();
@@ -644,7 +642,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
proxy->Fix();
@@ -658,28 +656,42 @@ static void ArrayBufferWeakCallback(v8::Isolate* external_isolate,
Isolate* isolate = reinterpret_cast<Isolate*>(external_isolate);
HandleScope scope(isolate);
Handle<Object> internal_object = Utils::OpenHandle(**object);
+ Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(*internal_object));
- size_t allocated_length = NumberToSize(
- isolate, JSArrayBuffer::cast(*internal_object)->byte_length());
- isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
- -static_cast<intptr_t>(allocated_length));
- if (data != NULL)
- free(data);
+ if (!array_buffer->is_external()) {
+ size_t allocated_length = NumberToSize(
+ isolate, array_buffer->byte_length());
+ isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
+ -static_cast<intptr_t>(allocated_length));
+ CHECK(V8::ArrayBufferAllocator() != NULL);
+ V8::ArrayBufferAllocator()->Free(data);
+ }
object->Dispose(external_isolate);
}
-bool Runtime::SetupArrayBuffer(Isolate* isolate,
+void Runtime::SetupArrayBuffer(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
+ bool is_external,
void* data,
size_t allocated_length) {
+ ASSERT(array_buffer->GetInternalFieldCount() ==
+ v8::ArrayBuffer::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) {
+ array_buffer->SetInternalField(i, Smi::FromInt(0));
+ }
array_buffer->set_backing_store(data);
+ array_buffer->set_flag(Smi::FromInt(0));
+ array_buffer->set_is_external(is_external);
Handle<Object> byte_length =
isolate->factory()->NewNumberFromSize(allocated_length);
CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber());
array_buffer->set_byte_length(*byte_length);
- return true;
+
+ array_buffer->set_weak_next(isolate->heap()->array_buffers_list());
+ isolate->heap()->set_array_buffers_list(*array_buffer);
+ array_buffer->set_weak_first_array(Smi::FromInt(0));
}
@@ -688,19 +700,19 @@ bool Runtime::SetupArrayBufferAllocatingData(
Handle<JSArrayBuffer> array_buffer,
size_t allocated_length) {
void* data;
+ CHECK(V8::ArrayBufferAllocator() != NULL);
if (allocated_length != 0) {
- data = malloc(allocated_length);
+ data = V8::ArrayBufferAllocator()->Allocate(allocated_length);
if (data == NULL) return false;
memset(data, 0, allocated_length);
} else {
data = NULL;
}
- if (!SetupArrayBuffer(isolate, array_buffer, data, allocated_length))
- return false;
+ SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length);
v8::Isolate* external_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- v8::Persistent<v8::Value> weak_handle = v8::Persistent<v8::Value>::New(
+ v8::Persistent<v8::Value> weak_handle(
external_isolate, v8::Utils::ToLocal(Handle<Object>::cast(array_buffer)));
weak_handle.MakeWeak(external_isolate, data, ArrayBufferWeakCallback);
weak_handle.MarkIndependent(external_isolate);
@@ -745,7 +757,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferInitialize) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferGetByteLength) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
return holder->byte_length();
@@ -849,6 +861,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
holder->set_length(*length_obj);
+ holder->set_weak_next(buffer->weak_first_array());
+ buffer->set_weak_first_array(*holder);
Handle<ExternalArray> elements =
isolate->factory()->NewExternalArray(
@@ -1189,7 +1203,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
if (!obj->IsJSObject()) return isolate->heap()->null_value();
@@ -1198,7 +1212,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
// We don't expect access checks to be needed on JSProxy objects.
@@ -1230,7 +1244,7 @@ static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_ARG_CHECKED(Object, prototype, 1);
@@ -1259,7 +1273,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
// See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
Object* O = args[0];
@@ -1452,7 +1466,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
return obj->PreventExtensions();
@@ -1460,7 +1474,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
if (obj->IsJSGlobalProxy()) {
@@ -1495,7 +1509,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* arg = args[0];
bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
@@ -1504,7 +1518,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(HeapObject, templ, 0);
CONVERT_SMI_ARG_CHECKED(index, 1)
@@ -1523,7 +1537,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(HeapObject, object, 0);
Map* old_map = object->map();
@@ -1542,7 +1556,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(HeapObject, object, 0);
Map* old_map = object->map();
@@ -1775,7 +1789,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
- NoHandleAllocation nha(isolate);
+ SealHandleScope shs(isolate);
// args[0] == name
// args[1] == language_mode
// args[2] == value (optional)
@@ -1831,7 +1845,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
// All constants are declared with an initial value. The name
// of the constant is the first argument and the initial value
// is the second.
@@ -2037,7 +2051,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_SMI_ARG_CHECKED(elements_count, 0);
if (elements_count < 0 ||
@@ -2056,7 +2070,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
reinterpret_cast<HeapObject*>(new_object)->
set_map(isolate->native_context()->regexp_result_map());
@@ -2073,8 +2087,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
- NoHandleAllocation ha(isolate);
- AssertNoAllocation no_alloc;
+ SealHandleScope shs(isolate);
+ DisallowHeapAllocation no_allocation;
ASSERT(args.length() == 5);
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_CHECKED(String, source, 1);
@@ -2193,7 +2207,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
if (!callable->IsJSFunction()) {
@@ -2211,7 +2225,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
@@ -2269,7 +2283,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2278,7 +2292,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2289,7 +2303,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
return isolate->heap()->ToBoolean(
@@ -2298,7 +2312,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
f->shared()->set_name_should_print_as_anonymous(true);
@@ -2307,7 +2321,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsGenerator) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
return isolate->heap()->ToBoolean(f->shared()->is_generator());
@@ -2315,7 +2329,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsGenerator) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2348,7 +2362,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2358,7 +2372,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(Code, code, 0);
@@ -2372,7 +2386,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2383,7 +2397,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2394,7 +2408,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2409,7 +2423,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -2451,7 +2465,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2460,7 +2474,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2507,10 +2521,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
// Since we don't store the source we should never optimize this.
target_shared->code()->set_optimizable(false);
- // Clear the optimization hints related to the compiled code as these
- // are no longer valid when the code is overwritten.
- target_shared->ClearThisPropertyAssignmentsInfo();
-
// Set the code of the target function.
target->ReplaceCode(source_shared->code());
ASSERT(target->next_function_link()->IsUndefined());
@@ -2550,7 +2560,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
JavaScriptFrameIterator it(isolate);
@@ -2578,7 +2588,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
@@ -2633,11 +2643,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
// called if the suspended activation had operands on the stack, stack handlers
// needing rewinding, or if the resume should throw an exception. The fast path
// is handled directly in FullCodeGenerator::EmitGeneratorResume(), which is
-// inlined into GeneratorNext, GeneratorSend, and GeneratorThrow.
-// EmitGeneratorResumeResume is called in any case, as it needs to reconstruct
-// the stack frame and make space for arguments and operands.
+// inlined into GeneratorNext and GeneratorThrow. EmitGeneratorResumeResume is
+// called in any case, as it needs to reconstruct the stack frame and make space
+// for arguments and operands.
RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
CONVERT_ARG_CHECKED(Object, value, 1);
@@ -2668,7 +2678,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
JSGeneratorObject::ResumeMode resume_mode =
static_cast<JSGeneratorObject::ResumeMode>(resume_mode_int);
switch (resume_mode) {
- case JSGeneratorObject::SEND:
+ case JSGeneratorObject::NEXT:
return value;
case JSGeneratorObject::THROW:
return isolate->Throw(value);
@@ -2692,6 +2702,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectFreeze) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSObject, object, 0);
+ return object->Freeze(isolate);
+}
+
+
MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
Object* char_code) {
if (char_code->IsNumber()) {
@@ -2703,7 +2721,7 @@ MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, subject, 0);
@@ -2727,7 +2745,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
return CharFromCode(isolate, args[0]);
}
@@ -2802,7 +2820,8 @@ class FixedArrayBuilder {
}
Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- FACTORY->SetContent(target_array, array_);
+ Factory* factory = target_array->GetIsolate()->factory();
+ factory->SetContent(target_array, array_);
target_array->set_length(Smi::FromInt(length_));
return target_array;
}
@@ -2897,7 +2916,7 @@ class ReplacementStringBuilder {
Handle<String> joined_string;
if (is_ascii_) {
Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
uint8_t* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
@@ -2907,7 +2926,7 @@ class ReplacementStringBuilder {
} else {
// Non-ASCII.
Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
uc16* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
@@ -3149,7 +3168,7 @@ bool CompiledReplacement::Compile(Handle<String> replacement,
int capture_count,
int subject_length) {
{
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
String::FlatContent content = replacement->GetFlatContent();
ASSERT(content.IsFlat());
bool simple = false;
@@ -3300,7 +3319,7 @@ void FindStringIndicesDispatch(Isolate* isolate,
unsigned int limit,
Zone* zone) {
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
String::FlatContent subject_content = subject->GetFlatContent();
String::FlatContent pattern_content = pattern->GetFlatContent();
ASSERT(subject_content.IsFlat());
@@ -3769,7 +3788,7 @@ int Runtime::StringMatch(Isolate* isolate,
if (!sub->IsFlat()) FlattenString(sub);
if (!pat->IsFlat()) FlattenString(pat);
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
// Extract flattened substrings of cons strings before determining asciiness.
String::FlatContent seq_sub = sub->GetFlatContent();
String::FlatContent seq_pat = pat->GetFlatContent();
@@ -3880,7 +3899,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
if (!pat->IsFlat()) FlattenString(pat);
int position = -1;
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
String::FlatContent sub_content = sub->GetFlatContent();
String::FlatContent pat_content = pat->GetFlatContent();
@@ -3914,7 +3933,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, str1, 0);
@@ -3962,7 +3981,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, value, 0);
@@ -4224,7 +4243,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(radix, 1);
RUNTIME_ASSERT(2 <= radix && radix <= 36);
@@ -4260,7 +4279,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
@@ -4276,7 +4295,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
@@ -4292,7 +4311,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
@@ -4429,7 +4448,7 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
Handle<Object> object = args.at<Object>(0);
@@ -4441,7 +4460,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
// KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric.
RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
// Fast cases for getting named properties of the receiver JSObject
@@ -4649,7 +4668,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
// Return property without being observable by accessors or interceptors.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
@@ -4862,7 +4881,7 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
Handle<Object> object = args.at<Object>(0);
@@ -4901,7 +4920,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsKind) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
if (object->IsJSObject()) {
@@ -4918,7 +4937,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
if (object->IsJSObject()) {
@@ -4938,7 +4957,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
// This is used to decide if we should transform null and undefined
// into the global object when doing call and apply.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
@@ -5004,7 +5023,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
// Check whether debugger and is about to step into the callback that is passed
// to a built-in function such as Array.forEach.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!isolate->IsDebuggerActive() || !isolate->debug()->StepInActive()) {
return isolate->heap()->false_value();
@@ -5024,7 +5043,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) {
// Set one shot breakpoints for the callback function that is passed to a
// built-in function such as Array.forEach to enable stepping into the callback.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
if (!debug->IsStepping()) return isolate->heap()->undefined_value();
@@ -5043,7 +5062,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
// Set a local property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_ARG_CHECKED(Name, name, 1);
@@ -5063,7 +5082,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSReceiver, object, 0);
@@ -5094,7 +5113,7 @@ static Object* HasLocalPropertyImplementation(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(Name, key, 1);
@@ -5133,7 +5152,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_CHECKED(Name, key, 1);
@@ -5145,7 +5164,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
@@ -5157,7 +5176,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, object, 0);
@@ -5185,7 +5204,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
// have none, the map of the object. This is used to speed up
// the check for deletions during a for-in.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0);
@@ -5424,7 +5443,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
// Compute the frame holding the arguments.
@@ -5483,7 +5502,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* object = args[0];
return (object->IsJSObject() && !object->IsGlobalObject())
@@ -5493,7 +5512,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
return isolate->heap()->ToBoolean(args[0]->BooleanValue());
@@ -5503,7 +5522,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
// Possible optimizations: put the type string into the oddballs.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
Object* obj = args[0];
if (obj->IsNumber()) return isolate->heap()->number_string();
@@ -5567,7 +5586,7 @@ static int ParseDecimalInteger(const uint8_t*s, int from, int to) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, subject, 0);
subject->TryFlatten();
@@ -5622,7 +5641,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewString) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_SMI_ARG_CHECKED(length, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
if (length == 0) return isolate->heap()->empty_string();
@@ -5647,11 +5666,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
Handle<String> string = FlattenGetString(source);
- String::FlatContent content = string->GetFlatContent();
- ASSERT(content.IsFlat());
- Handle<String> result =
- content.IsAscii() ? URIEscape::Escape<uint8_t>(isolate, source)
- : URIEscape::Escape<uc16>(isolate, source);
+ ASSERT(string->IsFlat());
+ Handle<String> result = string->IsOneByteRepresentationUnderneath()
+ ? URIEscape::Escape<uint8_t>(isolate, source)
+ : URIEscape::Escape<uc16>(isolate, source);
if (result.is_null()) return Failure::OutOfMemoryException(0x12);
return *result;
}
@@ -5662,10 +5680,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
Handle<String> string = FlattenGetString(source);
- String::FlatContent content = string->GetFlatContent();
- ASSERT(content.IsFlat());
- return content.IsAscii() ? *URIUnescape::Unescape<uint8_t>(isolate, source)
- : *URIUnescape::Unescape<uc16>(isolate, source);
+ ASSERT(string->IsFlat());
+ return string->IsOneByteRepresentationUnderneath()
+ ? *URIUnescape::Unescape<uint8_t>(isolate, source)
+ : *URIUnescape::Unescape<uc16>(isolate, source);
}
@@ -5686,7 +5704,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_BasicJSONStringify) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, s, 0);
CONVERT_SMI_ARG_CHECKED(radix, 1);
@@ -5700,7 +5718,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, str, 0);
// ECMA-262 section 15.1.2.3, empty string is NaN
@@ -5955,7 +5973,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
Arguments args,
Isolate* isolate,
unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, s, 0);
s = s->TryFlattenGetString();
@@ -6022,7 +6040,7 @@ static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, s, 0);
@@ -6148,7 +6166,7 @@ static int CopyCachedAsciiCharsToArray(Heap* heap,
const uint8_t* chars,
FixedArray* elements,
int length) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
FixedArray* ascii_cache = heap->single_character_string_cache();
Object* undefined = heap->undefined_value();
int i;
@@ -6194,6 +6212,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
elements = Handle<FixedArray>(FixedArray::cast(obj), isolate);
+ DisallowHeapAllocation no_gc;
String::FlatContent content = s->GetFlatContent();
if (content.IsAscii()) {
Vector<const uint8_t> chars = content.ToOneByteVector();
@@ -6228,7 +6247,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, value, 0);
return value->ToObject();
@@ -6243,7 +6262,7 @@ bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* number = args[0];
@@ -6254,7 +6273,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* number = args[0];
@@ -6266,7 +6285,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
@@ -6281,7 +6300,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
// ES6 draft 9.1.11
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPositiveInteger) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
@@ -6298,7 +6317,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPositiveInteger) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
@@ -6317,7 +6336,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
@@ -6326,7 +6345,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
@@ -6342,7 +6361,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
@@ -6361,14 +6380,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->AllocateHeapNumber(0);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6378,7 +6397,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6388,7 +6407,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6398,7 +6417,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6407,7 +6426,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->NumberFromDouble(9876543210.0);
@@ -6415,7 +6434,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6425,7 +6444,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6438,7 +6457,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberImul) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6448,7 +6467,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberImul) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, str1, 0);
CONVERT_ARG_CHECKED(String, str2, 1);
@@ -6497,7 +6516,7 @@ static inline void StringBuilderConcatHelper(String* special,
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, array, 0);
if (!args[1]->IsSmi()) {
@@ -6614,7 +6633,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, array, 0);
if (!args[1]->IsSmi()) {
@@ -6739,7 +6758,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
@@ -6835,7 +6854,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6845,7 +6864,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6855,7 +6874,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6865,7 +6884,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6874,7 +6893,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6884,7 +6903,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
@@ -6894,7 +6913,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6904,7 +6923,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6923,7 +6942,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, x, 0);
@@ -6941,7 +6960,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6956,7 +6975,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
// Compare two Smis as if they were converted to strings and then
// compared lexicographically.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(x_value, 0);
CONVERT_SMI_ARG_CHECKED(y_value, 1);
@@ -7060,6 +7079,7 @@ static Object* FlatStringCompare(String* x, String* y) {
equal_prefix_result = Smi::FromInt(LESS);
}
int r;
+ DisallowHeapAllocation no_gc;
String::FlatContent x_content = x->GetFlatContent();
String::FlatContent y_content = y->GetFlatContent();
if (x_content.IsAscii()) {
@@ -7094,7 +7114,7 @@ static Object* FlatStringCompare(String* x, String* y) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, x, 0);
@@ -7129,7 +7149,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_acos()->Increment();
@@ -7139,7 +7159,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_asin()->Increment();
@@ -7149,7 +7169,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_atan()->Increment();
@@ -7162,7 +7182,7 @@ static const double kPiDividedBy4 = 0.78539816339744830962;
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_atan2()->Increment();
@@ -7185,7 +7205,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_ceil()->Increment();
@@ -7195,7 +7215,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_cos()->Increment();
@@ -7205,7 +7225,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_exp()->Increment();
@@ -7216,7 +7236,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_floor()->Increment();
@@ -7226,7 +7246,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_log()->Increment();
@@ -7237,7 +7257,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
@@ -7259,7 +7279,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
// Fast version of Math.pow if we know that y is not an integer and y is not
// -0.5 or 0.5. Used as slow case from full codegen.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
@@ -7276,7 +7296,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_round()->Increment();
@@ -7319,7 +7339,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_sin()->Increment();
@@ -7329,7 +7349,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_sqrt()->Increment();
@@ -7339,7 +7359,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_tan()->Increment();
@@ -7349,7 +7369,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(year, 0);
@@ -7492,7 +7512,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
JSFunction* callee = JSFunction::cast(args[0]);
@@ -7512,7 +7532,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
FixedArray* array = reinterpret_cast<FixedArray*>(obj);
array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
array->set_length(length);
@@ -7757,7 +7777,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
// called using 'new' and creates a new JSFunction object that
// is returned. The receiver object is only used for error
// reporting if an error occurs when constructing the new
- // JSFunction. FACTORY->NewJSObject() should not be used to
+ // JSFunction. Factory::NewJSObject() should not be used to
// allocate JSFunctions since it does not properly initialize
// the shared part of the function. Since the receiver is
// ignored anyway, we use the global object as the receiver
@@ -7935,7 +7955,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- ASSERT(isolate->heap()->IsAllocationAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
delete deoptimizer;
return isolate->heap()->undefined_value();
}
@@ -7948,7 +7968,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
Deoptimizer::BailoutType type =
static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- ASSERT(isolate->heap()->IsAllocationAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
ASSERT(deoptimizer->compiled_code_kind() == Code::OPTIMIZED_FUNCTION);
@@ -8009,7 +8029,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
delete deoptimizer;
return isolate->heap()->undefined_value();
@@ -8042,7 +8062,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
#if defined(USE_SIMULATOR)
return isolate->heap()->true_value();
#else
@@ -8241,14 +8261,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetRootNaN) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
return isolate->heap()->nan_value();
}
@@ -8342,7 +8362,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -8362,7 +8382,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -8379,7 +8399,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
JSObject* extension_object;
if (args[0]->IsJSObject()) {
@@ -8423,7 +8443,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
String* name = String::cast(args[0]);
Object* thrown_object = args[1];
@@ -8449,7 +8469,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
JSFunction* function;
@@ -8473,7 +8493,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
return isolate->heap()->ToBoolean(obj->IsJSModule());
@@ -8481,7 +8501,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(index, 0);
@@ -8865,7 +8885,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT_EQ(0, args.length());
return isolate->PromoteScheduledException();
}
@@ -8893,12 +8913,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
// First check if this is a real stack overflow.
if (isolate->stack_guard()->IsStackOverflow()) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
return isolate->StackOverflow();
}
@@ -8907,7 +8927,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return Execution::HandleStackGuardInterrupt(isolate);
}
@@ -8943,7 +8963,7 @@ static void PrintTransition(Isolate* isolate, Object* result) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
PrintTransition(isolate, NULL);
return isolate->heap()->undefined_value();
@@ -8951,14 +8971,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
PrintTransition(isolate, args[0]);
return args[0]; // return TOS
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
#ifdef DEBUG
@@ -8989,7 +9009,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
isolate->PrintStack(stdout);
return isolate->heap()->undefined_value();
@@ -8997,7 +9017,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
// According to ECMA-262, section 15.9.1, page 117, the precision of
@@ -9023,7 +9043,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
if (maybe_result_array->IsFailure()) return maybe_result_array;
RUNTIME_ASSERT(output->HasFastObjectElements());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_gc;
FixedArray* output_array = FixedArray::cast(output->elements());
RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
@@ -9049,7 +9069,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -9060,7 +9080,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -9071,7 +9091,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* global = args[0];
if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
@@ -9214,20 +9234,25 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
- // Allocate a block of memory in NewSpace (filled with a filler).
- // Use as fallback for allocation in generated code when NewSpace
+static MaybeObject* Allocate(Isolate* isolate,
+ int size,
+ AllocationSpace space) {
+ // Allocate a block of memory in the given space (filled with a filler).
+ // Use as fallback for allocation in generated code when the space
// is full.
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
- int size = size_smi->value();
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
Heap* heap = isolate->heap();
- RUNTIME_ASSERT(size <= heap->MaxNewSpaceAllocationSize());
+ RUNTIME_ASSERT(size <= heap->MaxRegularSpaceAllocationSize());
Object* allocation;
- { MaybeObject* maybe_allocation = heap->new_space()->AllocateRaw(size);
+ { MaybeObject* maybe_allocation;
+ if (space == NEW_SPACE) {
+ maybe_allocation = heap->new_space()->AllocateRaw(size);
+ } else {
+ ASSERT(space == OLD_POINTER_SPACE || space == OLD_DATA_SPACE);
+ maybe_allocation = heap->paged_space(space)->AllocateRaw(size);
+ }
if (maybe_allocation->ToObject(&allocation)) {
heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
}
@@ -9236,24 +9261,27 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
+ return Allocate(isolate, size_smi->value(), NEW_SPACE);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldPointerSpace) {
- // Allocate a block of memory in old pointer space (filled with a filler).
- // Use as fallback for allocation in generated code when old pointer space
- // is full.
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
- int size = size_smi->value();
- RUNTIME_ASSERT(IsAligned(size, kPointerSize));
- RUNTIME_ASSERT(size > 0);
- Heap* heap = isolate->heap();
- Object* allocation;
- { MaybeObject* maybe_allocation =
- heap->old_pointer_space()->AllocateRaw(size);
- if (maybe_allocation->ToObject(&allocation)) {
- heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
- }
- return maybe_allocation;
- }
+ return Allocate(isolate, size_smi->value(), OLD_POINTER_SPACE);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldDataSpace) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
+ return Allocate(isolate, size_smi->value(), OLD_DATA_SPACE);
}
@@ -9261,7 +9289,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldPointerSpace) {
// array. Returns true if the element was pushed on the stack and
// false otherwise.
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, array, 0);
CONVERT_ARG_CHECKED(JSReceiver, element, 1);
@@ -9978,7 +10006,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
// This will not allocate (flatten the string), but it may run
// very slowly for very deeply nested ConsStrings. For debugging use only.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, string, 0);
@@ -9997,7 +10025,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
// property.
// Returns the number of non-undefined elements collected.
RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
@@ -10007,7 +10035,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
// Move contents of argument 0 (an array) to argument 1 (an array)
RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, from, 0);
CONVERT_ARG_CHECKED(JSArray, to, 1);
@@ -10033,7 +10061,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
// How many elements does this object/array have?
RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, object, 0);
HeapObject* elements = object->elements();
@@ -10092,7 +10120,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_CHECKED(Name, name, 1);
@@ -10105,7 +10133,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
#ifdef ENABLE_DEBUGGER_SUPPORT
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return Execution::DebugBreakHelper();
}
@@ -10128,7 +10156,7 @@ static StackFrame::Id UnwrapFrameId(int wrapped) {
// clearing the event listener function
// args[1]: object supplied during callback
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
RUNTIME_ASSERT(args[0]->IsJSFunction() ||
args[0]->IsUndefined() ||
@@ -10142,7 +10170,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
isolate->stack_guard()->DebugBreak();
return isolate->heap()->undefined_value();
@@ -10340,7 +10368,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
// Return the property type calculated from the property details.
// args[0]: smi with property details.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
return Smi::FromInt(static_cast<int>(details.type()));
@@ -10350,7 +10378,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
// Return the property attribute calculated from the property details.
// args[0]: smi with property details.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
return Smi::FromInt(static_cast<int>(details.attributes()));
@@ -10360,7 +10388,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
// Return the property insertion index calculated from the property details.
// args[0]: smi with property details.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
// TODO(verwaest): Depends on the type of details.
@@ -10398,7 +10426,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() >= 1);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
// Check that the break id is valid.
@@ -11201,7 +11229,9 @@ class ScopeIterator {
// Find the break point where execution has stopped.
BreakLocationIterator break_location_iterator(debug_info,
ALL_BREAK_LOCATIONS);
- break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+ // pc points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
if (break_location_iterator.IsExit()) {
// We are within the return sequence. At the momemt it is not possible to
// get a source position which is consistent with the current scope chain.
@@ -11214,7 +11244,9 @@ class ScopeIterator {
context_ = Handle<Context>(context_->previous(), isolate_);
}
}
- if (scope_info->Type() != EVAL_SCOPE) nested_scope_chain_.Add(scope_info);
+ if (scope_info->scope_type() != EVAL_SCOPE) {
+ nested_scope_chain_.Add(scope_info);
+ }
} else {
// Reparse the code and analyze the scopes.
Handle<Script> script(Script::cast(shared_info->script()));
@@ -11222,13 +11254,13 @@ class ScopeIterator {
// Check whether we are in global, eval or function code.
Handle<ScopeInfo> scope_info(shared_info->scope_info());
- if (scope_info->Type() != FUNCTION_SCOPE) {
+ if (scope_info->scope_type() != FUNCTION_SCOPE) {
// Global or eval code.
CompilationInfoWithZone info(script);
- if (scope_info->Type() == GLOBAL_SCOPE) {
+ if (scope_info->scope_type() == GLOBAL_SCOPE) {
info.MarkAsGlobal();
} else {
- ASSERT(scope_info->Type() == EVAL_SCOPE);
+ ASSERT(scope_info->scope_type() == EVAL_SCOPE);
info.MarkAsEval();
info.SetContext(Handle<Context>(function_->context()));
}
@@ -11294,7 +11326,7 @@ class ScopeIterator {
ASSERT(!failed_);
if (!nested_scope_chain_.is_empty()) {
Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
- switch (scope_info->Type()) {
+ switch (scope_info->scope_type()) {
case FUNCTION_SCOPE:
ASSERT(context_->IsFunctionContext() ||
!scope_info->HasContext());
@@ -12002,7 +12034,7 @@ static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
Handle<Context> current = context_chain.RemoveLast();
ASSERT(!(scope_info->HasContext() & current.is_null()));
- if (scope_info->Type() == CATCH_SCOPE) {
+ if (scope_info->scope_type() == CATCH_SCOPE) {
ASSERT(current->IsCatchContext());
Handle<String> name(String::cast(current->extension()));
Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX),
@@ -12012,7 +12044,7 @@ static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
context,
name,
thrown_object);
- } else if (scope_info->Type() == BLOCK_SCOPE) {
+ } else if (scope_info->scope_type() == BLOCK_SCOPE) {
// Materialize the contents of the block scope into a JSObject.
ASSERT(current->IsBlockContext());
Handle<JSObject> block_scope_object =
@@ -12027,7 +12059,7 @@ static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
new_context->set_previous(*context);
context = new_context;
} else {
- ASSERT(scope_info->Type() == WITH_SCOPE);
+ ASSERT(scope_info->scope_type() == WITH_SCOPE);
ASSERT(current->IsWithContext());
Handle<JSObject> extension(JSObject::cast(current->extension()));
context =
@@ -12299,8 +12331,8 @@ static int DebugReferencedBy(HeapIterator* iterator,
FixedArray* instances, int instances_size,
JSFunction* arguments_function) {
Isolate* isolate = target->GetIsolate();
- NoHandleAllocation ha(isolate);
- AssertNoAllocation no_alloc;
+ SealHandleScope shs(isolate);
+ DisallowHeapAllocation no_allocation;
// Iterate the heap.
int count = 0;
@@ -12368,7 +12400,7 @@ static int DebugReferencedBy(HeapIterator* iterator,
// args[1]: constructor function for instances to exclude (Mirror)
// args[2]: the the maximum number of objects to return
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
@@ -12432,7 +12464,7 @@ static int DebugConstructedBy(HeapIterator* iterator,
int max_references,
FixedArray* instances,
int instances_size) {
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
// Iterate the heap.
int count = 0;
@@ -12462,7 +12494,7 @@ static int DebugConstructedBy(HeapIterator* iterator,
// args[0]: the constructor to find instances of
// args[1]: the the maximum number of objects to return
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
@@ -12512,7 +12544,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Find the effective prototype object as returned by __proto__.
// args[0]: the object to find the prototype for.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
return GetPrototypeSkipHiddenPrototypes(isolate, obj);
@@ -12539,7 +12571,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
CPU::DebugBreak();
return isolate->heap()->undefined_value();
@@ -12577,7 +12609,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -12588,7 +12620,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
Script* script,
FixedArray* buffer) {
- AssertNoAllocation no_allocations;
+ DisallowHeapAllocation no_allocation;
int counter = 0;
int buffer_size = buffer->length();
for (HeapObject* obj = iterator->next();
@@ -12631,7 +12663,7 @@ RUNTIME_FUNCTION(MaybeObject*,
Heap* heap = isolate->heap();
{
heap->EnsureHeapIsIterable();
- AssertNoAllocation no_allocations;
+ DisallowHeapAllocation no_allocation;
HeapIterator heap_iterator(heap);
Script* scr = *script;
FixedArray* arr = *array;
@@ -12640,7 +12672,7 @@ RUNTIME_FUNCTION(MaybeObject*,
if (number > kBufferSize) {
array = isolate->factory()->NewFixedArray(number);
heap->EnsureHeapIsIterable();
- AssertNoAllocation no_allocations;
+ DisallowHeapAllocation no_allocation;
HeapIterator heap_iterator(heap);
Script* scr = *script;
FixedArray* arr = *array;
@@ -12925,7 +12957,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
// Sets a v8 flag.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, arg, 0);
SmartArrayPointer<char> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -12937,7 +12969,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
// Performs a GC.
// Presently, it only does a full GC.
RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage");
return isolate->heap()->undefined_value();
}
@@ -12945,7 +12977,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
// Gets the current heap usage.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
if (!Smi::IsValid(usage)) {
return *isolate->factory()->NewNumberFromInt(usage);
@@ -12957,14 +12989,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
v8::V8::ResumeProfiler();
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
v8::V8::PauseProfiler();
return isolate->heap()->undefined_value();
}
@@ -12981,9 +13013,10 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
// Scan the heap for Script objects to find the script with the requested
// script data.
Handle<Script> script;
+ Factory* factory = script_name->GetIsolate()->factory();
Heap* heap = script_name->GetHeap();
heap->EnsureHeapIsIterable();
- AssertNoAllocation no_allocation_during_heap_iteration;
+ DisallowHeapAllocation no_allocation_during_heap_iteration;
HeapIterator iterator(heap);
HeapObject* obj = NULL;
while (script.is_null() && ((obj = iterator.next()) != NULL)) {
@@ -12998,7 +13031,7 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
}
// If no script with the requested script data is found return undefined.
- if (script.is_null()) return FACTORY->undefined_value();
+ if (script.is_null()) return factory->undefined_value();
// Return the script found.
return GetScriptWrapper(script);
@@ -13084,7 +13117,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetOverflowedStackTrace) {
// Returns V8 version as a string.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT_EQ(args.length(), 0);
const char* version_string = v8::V8::GetVersion();
@@ -13095,7 +13128,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
OS::PrintError("abort: %s\n",
reinterpret_cast<char*>(args[0]) + args.smi_at(1));
@@ -13116,7 +13149,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FlattenString) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
// This is only called from codegen, so checks might be more lax.
CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
Object* key = args[1];
@@ -13214,14 +13247,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return Smi::FromInt(message->start_position());
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return message->script();
}
@@ -13275,10 +13308,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, format, 0);
CONVERT_ARG_CHECKED(JSArray, elms, 1);
+ DisallowHeapAllocation no_gc;
String::FlatContent format_content = format->GetFlatContent();
RUNTIME_ASSERT(format_content.IsAscii());
Vector<const uint8_t> chars = format_content.ToOneByteVector();
@@ -13305,6 +13339,7 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(NonStrictArgumentsElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalByteElements)
@@ -13322,7 +13357,7 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, obj1, 0);
CONVERT_ARG_CHECKED(JSObject, obj2, 1);
@@ -13331,9 +13366,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
+
+ if (!args[0]->IsJSReceiver()) return isolate->heap()->false_value();
+ JSReceiver* obj = JSReceiver::cast(args[0]);
if (obj->IsJSGlobalProxy()) {
Object* proto = obj->GetPrototype();
if (proto->IsNull()) return isolate->heap()->false_value();
@@ -13345,7 +13382,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_observed, 1);
@@ -13376,7 +13413,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetObserverDeliveryPending) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
isolate->set_observer_delivery_pending(true);
return isolate->heap()->undefined_value();
@@ -13384,7 +13421,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetObserverDeliveryPending) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->observation_state();
}
@@ -13405,7 +13442,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ObservationWeakMapCreate) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* object = args[0];
if (object->IsJSGlobalProxy()) {
@@ -13416,6 +13453,107 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) {
}
+static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
+ Handle<JSFunction> constructor,
+ Handle<Object> type_info,
+ Arguments* caller_args) {
+ bool holey = false;
+ bool can_use_type_feedback = true;
+ if (caller_args->length() == 1) {
+ Object* argument_one = (*caller_args)[0];
+ if (argument_one->IsSmi()) {
+ int value = Smi::cast(argument_one)->value();
+ if (value < 0 || value >= JSObject::kInitialMaxFastElementArray) {
+ // the array is a dictionary in this case.
+ can_use_type_feedback = false;
+ } else if (value != 0) {
+ holey = true;
+ }
+ } else {
+ // Non-smi length argument produces a dictionary
+ can_use_type_feedback = false;
+ }
+ }
+
+ JSArray* array;
+ MaybeObject* maybe_array;
+ if (!type_info.is_null() &&
+ *type_info != isolate->heap()->undefined_value() &&
+ JSGlobalPropertyCell::cast(*type_info)->value()->IsSmi() &&
+ can_use_type_feedback) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info);
+ Smi* smi = Smi::cast(cell->value());
+ ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ if (holey && !IsFastHoleyElementsKind(to_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ // Update the allocation site info to reflect the advice alteration.
+ cell->set_value(Smi::FromInt(to_kind));
+ }
+
+ maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
+ *constructor, type_info);
+ if (!maybe_array->To(&array)) return maybe_array;
+ } else {
+ maybe_array = isolate->heap()->AllocateJSObject(*constructor);
+ if (!maybe_array->To(&array)) return maybe_array;
+ // We might need to transition to holey
+ ElementsKind kind = constructor->initial_map()->elements_kind();
+ if (holey && !IsFastHoleyElementsKind(kind)) {
+ kind = GetHoleyElementsKind(kind);
+ maybe_array = array->TransitionElementsKind(kind);
+ if (maybe_array->IsFailure()) return maybe_array;
+ }
+ }
+
+ maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
+ if (maybe_array->IsFailure()) return maybe_array;
+ maybe_array = ArrayConstructInitializeElements(array, caller_args);
+ if (maybe_array->IsFailure()) return maybe_array;
+ return array;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
+ HandleScope scope(isolate);
+ // If we get 2 arguments then they are the stub parameters (constructor, type
+ // info). If we get 3, then the first one is a pointer to the arguments
+ // passed by the caller.
+ Arguments empty_args(0, NULL);
+ bool no_caller_args = args.length() == 2;
+ ASSERT(no_caller_args || args.length() == 3);
+ int parameters_start = no_caller_args ? 0 : 1;
+ Arguments* caller_args = no_caller_args
+ ? &empty_args
+ : reinterpret_cast<Arguments*>(args[0]);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
+ CONVERT_ARG_HANDLE_CHECKED(Object, type_info, parameters_start + 1);
+
+ return ArrayConstructorCommon(isolate,
+ constructor,
+ type_info,
+ caller_args);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalArrayConstructor) {
+ HandleScope scope(isolate);
+ Arguments empty_args(0, NULL);
+ bool no_caller_args = args.length() == 1;
+ ASSERT(no_caller_args || args.length() == 2);
+ int parameters_start = no_caller_args ? 0 : 1;
+ Arguments* caller_args = no_caller_args
+ ? &empty_args
+ : reinterpret_cast<Arguments*>(args[0]);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
+
+ return ArrayConstructorCommon(isolate,
+ constructor,
+ Handle<Object>::null(),
+ caller_args);
+}
+
+
// ----------------------------------------------------------------------------
// Implementation of Runtime
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index a37c851e81..ef5401610f 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -102,6 +102,7 @@ namespace internal {
F(CompileForOnStackReplacement, 1, 1) \
F(AllocateInNewSpace, 1, 1) \
F(AllocateInOldPointerSpace, 1, 1) \
+ F(AllocateInOldDataSpace, 1, 1) \
F(SetNativeFlag, 1, 1) \
F(StoreArrayLiteralElement, 5, 1) \
F(DebugCallbackSupportsStepping, 1, 1) \
@@ -286,6 +287,8 @@ namespace internal {
F(GetArrayKeys, 2, 1) \
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
+ F(ArrayConstructor, -1, 1) \
+ F(InternalArrayConstructor, -1, 1) \
\
/* Getters and Setters */ \
F(LookupAccessor, 3, 1) \
@@ -303,6 +306,9 @@ namespace internal {
F(ResumeJSGeneratorObject, 3, 1) \
F(ThrowGeneratorStateError, 1, 1) \
\
+ /* ES5 */ \
+ F(ObjectFreeze, 1, 1) \
+ \
/* Harmony modules */ \
F(IsJSModule, 1, 1) \
\
@@ -423,6 +429,7 @@ namespace internal {
F(HasFastDoubleElements, 1, 1) \
F(HasFastHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
+ F(HasNonStrictArgumentsElements, 1, 1) \
F(HasExternalPixelElements, 1, 1) \
F(HasExternalArrayElements, 1, 1) \
F(HasExternalByteElements, 1, 1) \
@@ -565,7 +572,7 @@ namespace internal {
F(HasCachedArrayIndex, 1, 1) \
F(GetCachedArrayIndex, 1, 1) \
F(FastAsciiArrayJoin, 2, 1) \
- F(GeneratorSend, 2, 1) \
+ F(GeneratorNext, 2, 1) \
F(GeneratorThrow, 2, 1)
@@ -750,8 +757,9 @@ class Runtime : public AllStatic {
Handle<Object> object,
Handle<Object> key);
- static bool SetupArrayBuffer(Isolate* isolate,
+ static void SetupArrayBuffer(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
+ bool is_external,
void* data,
size_t allocated_length);
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index 307d948bfc..fc8bf7a411 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -151,7 +151,7 @@ class SafepointTable BASE_EMBEDDED {
static void PrintBits(uint8_t byte, int digits);
- AssertNoAllocation no_allocation_;
+ DisallowHeapAllocation no_allocation_;
Code* code_;
unsigned length_;
unsigned entry_size_;
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index da186b6ce4..efac288ee7 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -145,23 +145,130 @@ enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
namespace v8 {
namespace internal {
-#if defined(USE_SIGNALS)
+namespace {
-class Sampler::PlatformData : public Malloced {
+class PlatformDataCommon : public Malloced {
public:
- PlatformData()
- : vm_tid_(pthread_self()),
- profiled_thread_id_(ThreadId::Current()) {}
+ PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {}
+ ThreadId profiled_thread_id() { return profiled_thread_id_; }
+
+ protected:
+ ~PlatformDataCommon() {}
+
+ private:
+ ThreadId profiled_thread_id_;
+};
+
+} // namespace
+#if defined(USE_SIGNALS)
+
+class Sampler::PlatformData : public PlatformDataCommon {
+ public:
+ PlatformData() : vm_tid_(pthread_self()) {}
pthread_t vm_tid() const { return vm_tid_; }
- ThreadId profiled_thread_id() { return profiled_thread_id_; }
private:
pthread_t vm_tid_;
- ThreadId profiled_thread_id_;
+};
+
+#elif defined(__MACH__)
+
+class Sampler::PlatformData : public PlatformDataCommon {
+ public:
+ PlatformData() : profiled_thread_(mach_thread_self()) {}
+
+ ~PlatformData() {
+ // Deallocate Mach port for thread.
+ mach_port_deallocate(mach_task_self(), profiled_thread_);
+ }
+
+ thread_act_t profiled_thread() { return profiled_thread_; }
+
+ private:
+ // Note: for profiled_thread_ Mach primitives are used instead of PThread's
+ // because the latter doesn't provide thread manipulation primitives required.
+ // For details, consult "Mac OS X Internals" book, Section 7.3.
+ thread_act_t profiled_thread_;
+};
+
+#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+
+// ----------------------------------------------------------------------------
+// Win32 profiler support. On Cygwin we use the same sampler implementation as
+// on Win32.
+
+class Sampler::PlatformData : public PlatformDataCommon {
+ public:
+ // Get a handle to the calling thread. This is the thread that we are
+ // going to profile. We need to make a copy of the handle because we are
+ // going to use it in the sampler thread. Using GetThreadHandle() will
+ // not work in this case. We're using OpenThread because DuplicateHandle
+ // for some reason doesn't work in Chrome's sandbox.
+ PlatformData()
+ : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
+ THREAD_SUSPEND_RESUME |
+ THREAD_QUERY_INFORMATION,
+ false,
+ GetCurrentThreadId())) {}
+
+ ~PlatformData() {
+ if (profiled_thread_ != NULL) {
+ CloseHandle(profiled_thread_);
+ profiled_thread_ = NULL;
+ }
+ }
+
+ HANDLE profiled_thread() { return profiled_thread_; }
+
+ private:
+ HANDLE profiled_thread_;
+};
+#endif
+
+
+class SampleHelper {
+ public:
+ inline TickSample* Init(Sampler* sampler, Isolate* isolate) {
+#if defined(USE_SIMULATOR)
+ ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
+ Isolate::PerIsolateThreadData* per_thread_data = isolate->
+ FindPerThreadDataForThread(thread_id);
+ if (!per_thread_data) return NULL;
+ simulator_ = per_thread_data->simulator();
+ // Check if there is active simulator before allocating TickSample.
+ if (!simulator_) return NULL;
+#endif // USE_SIMULATOR
+ TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
+ if (sample == NULL) sample = &sample_obj;
+ return sample;
+ }
+
+#if defined(USE_SIMULATOR)
+ inline void FillRegisters(TickSample* sample) {
+ sample->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ sample->sp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::sp));
+#if V8_TARGET_ARCH_ARM
+ sample->fp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::r11));
+#elif V8_TARGET_ARCH_MIPS
+ sample->fp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::fp));
+#endif
+ }
+#endif // USE_SIMULATOR
+
+ private:
+#if defined(USE_SIMULATOR)
+ Simulator* simulator_;
+#endif
+ TickSample sample_obj;
};
+#if defined(USE_SIGNALS)
+
class SignalHandler : public AllStatic {
public:
static inline void EnsureInstalled() {
@@ -217,32 +324,12 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
-#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
- ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
- Isolate::PerIsolateThreadData* per_thread_data = isolate->
- FindPerThreadDataForThread(thread_id);
- if (!per_thread_data) return;
- Simulator* sim = per_thread_data->simulator();
- // Check if there is active simulator before allocating TickSample.
- if (!sim) return;
-#endif
-#endif // USE_SIMULATOR
-
- TickSample sample_obj;
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
+ SampleHelper helper;
+ TickSample* sample = helper.Init(sampler, isolate);
+ if (sample == NULL) return;
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
- sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
- sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
-#elif V8_TARGET_ARCH_MIPS
- sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
- sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
- sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
-#endif // V8_TARGET_ARCH_*
+ helper.FillRegisters(sample);
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -323,65 +410,6 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
#endif // __native_client__
}
-#elif defined(__MACH__)
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData()
- : profiled_thread_(mach_thread_self()),
- profiled_thread_id_(ThreadId::Current()) {}
-
- ~PlatformData() {
- // Deallocate Mach port for thread.
- mach_port_deallocate(mach_task_self(), profiled_thread_);
- }
-
- thread_act_t profiled_thread() { return profiled_thread_; }
- ThreadId profiled_thread_id() { return profiled_thread_id_; }
-
- private:
- // Note: for profiled_thread_ Mach primitives are used instead of PThread's
- // because the latter doesn't provide thread manipulation primitives required.
- // For details, consult "Mac OS X Internals" book, Section 7.3.
- thread_act_t profiled_thread_;
- ThreadId profiled_thread_id_;
-};
-
-#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
-
-// ----------------------------------------------------------------------------
-// Win32 profiler support. On Cygwin we use the same sampler implementation as
-// on Win32.
-
-class Sampler::PlatformData : public Malloced {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData()
- : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())),
- profiled_thread_id_(ThreadId::Current()) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
- ThreadId profiled_thread_id() { return profiled_thread_id_; }
-
- private:
- HANDLE profiled_thread_;
- ThreadId profiled_thread_id_;
-};
-
#endif
@@ -478,20 +506,10 @@ class SamplerThread : public Thread {
void SampleContext(Sampler* sampler) {
thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
Isolate* isolate = sampler->isolate();
-#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
- ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
- Isolate::PerIsolateThreadData* per_thread_data = isolate->
- FindPerThreadDataForThread(thread_id);
- if (!per_thread_data) return;
- Simulator* sim = per_thread_data->simulator();
- // Check if there is active simulator before allocating TickSample.
- if (!sim) return;
-#endif
-#endif // USE_SIMULATOR
- TickSample sample_obj;
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
+
+ SampleHelper helper;
+ TickSample* sample = helper.Init(sampler, isolate);
+ if (sample == NULL) return;
if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
@@ -523,15 +541,7 @@ class SamplerThread : public Thread {
&count) == KERN_SUCCESS) {
sample->state = isolate->current_vm_state();
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
- sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
- sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
-#elif V8_TARGET_ARCH_MIPS
- sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
- sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
- sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
-#endif
+ helper.FillRegisters(sample);
#else
sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
@@ -550,42 +560,22 @@ class SamplerThread : public Thread {
HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
if (profiled_thread == NULL) return;
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
-
Isolate* isolate = sampler->isolate();
-#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
- ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
- Isolate::PerIsolateThreadData* per_thread_data = isolate->
- FindPerThreadDataForThread(thread_id);
- if (!per_thread_data) return;
- Simulator* sim = per_thread_data->simulator();
- // Check if there is active simulator before allocating TickSample.
- if (!sim) return;
-#endif
-#endif // USE_SIMULATOR
- TickSample sample_obj;
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
+ SampleHelper helper;
+ TickSample* sample = helper.Init(sampler, isolate);
+ if (sample == NULL) return;
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+ const DWORD kSuspendFailed = static_cast<DWORD>(-1);
if (SuspendThread(profiled_thread) == kSuspendFailed) return;
sample->state = isolate->current_vm_state();
+ // Context used for sampling the register state of the profiled thread.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(profiled_thread, &context) != 0) {
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
- sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
- sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
-#elif V8_TARGET_ARCH_MIPS
- sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
- sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
- sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
-#endif
+ helper.FillRegisters(sample);
#else
#if V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(context.Rip);
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 92418f72b1..eb6764e80f 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -178,6 +178,11 @@ class LiteralBuffer {
bool is_ascii() { return is_ascii_; }
+ bool is_contextual_keyword(Vector<const char> keyword) {
+ return is_ascii() && keyword.length() == position_ &&
+ (memcmp(keyword.start(), backing_store_.start(), position_) == 0);
+ }
+
Vector<const uc16> utf16_literal() {
ASSERT(!is_ascii_);
ASSERT((position_ & 0x1) == 0);
@@ -325,6 +330,10 @@ class Scanner {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->is_ascii();
}
+ bool is_literal_contextual_keyword(Vector<const char> keyword) {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->is_contextual_keyword(keyword);
+ }
int literal_length() const {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->length();
@@ -361,6 +370,10 @@ class Scanner {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->is_ascii();
}
+ bool is_next_contextual_keyword(Vector<const char> keyword) {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->is_contextual_keyword(keyword);
+ }
int next_literal_length() const {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->length();
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 15ee29f9fc..c9df1fb580 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -74,10 +74,11 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
+ parameter_count + stack_local_count + 2 * context_local_count
+ (has_function_name ? 2 : 0);
- Handle<ScopeInfo> scope_info = FACTORY->NewScopeInfo(length);
+ Factory* factory = Isolate::Current()->factory();
+ Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
// Encode the flags.
- int flags = TypeField::encode(scope->type()) |
+ int flags = ScopeTypeField::encode(scope->scope_type()) |
CallsEvalField::encode(scope->calls_eval()) |
LanguageModeField::encode(scope->language_mode()) |
FunctionVariableField::encode(function_name_info) |
@@ -154,9 +155,9 @@ ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
}
-ScopeType ScopeInfo::Type() {
+ScopeType ScopeInfo::scope_type() {
ASSERT(length() > 0);
- return TypeField::decode(Flags());
+ return ScopeTypeField::decode(Flags());
}
@@ -192,9 +193,9 @@ int ScopeInfo::ContextLength() {
FunctionVariableField::decode(Flags()) == CONTEXT;
bool has_context = context_locals > 0 ||
function_name_context_slot ||
- Type() == WITH_SCOPE ||
- (Type() == FUNCTION_SCOPE && CallsEval()) ||
- Type() == MODULE_SCOPE;
+ scope_type() == WITH_SCOPE ||
+ (scope_type() == FUNCTION_SCOPE && CallsEval()) ||
+ scope_type() == MODULE_SCOPE;
if (has_context) {
return Context::MIN_CONTEXT_SLOTS + context_locals +
(function_name_context_slot ? 1 : 0);
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 208dc76ac7..6ae7cc0691 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -104,7 +104,7 @@ Variable* VariableMap::Lookup(Handle<String> name) {
// ----------------------------------------------------------------------------
// Implementation of Scope
-Scope::Scope(Scope* outer_scope, ScopeType type, Zone* zone)
+Scope::Scope(Scope* outer_scope, ScopeType scope_type, Zone* zone)
: isolate_(zone->isolate()),
inner_scopes_(4, zone),
variables_(zone),
@@ -114,19 +114,19 @@ Scope::Scope(Scope* outer_scope, ScopeType type, Zone* zone)
unresolved_(16, zone),
decls_(4, zone),
interface_(FLAG_harmony_modules &&
- (type == MODULE_SCOPE || type == GLOBAL_SCOPE)
+ (scope_type == MODULE_SCOPE || scope_type == GLOBAL_SCOPE)
? Interface::NewModule(zone) : NULL),
already_resolved_(false),
zone_(zone) {
- SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
+ SetDefaults(scope_type, outer_scope, Handle<ScopeInfo>::null());
// The outermost scope must be a global scope.
- ASSERT(type == GLOBAL_SCOPE || outer_scope != NULL);
+ ASSERT(scope_type == GLOBAL_SCOPE || outer_scope != NULL);
ASSERT(!HasIllegalRedeclaration());
}
Scope::Scope(Scope* inner_scope,
- ScopeType type,
+ ScopeType scope_type,
Handle<ScopeInfo> scope_info,
Zone* zone)
: isolate_(Isolate::Current()),
@@ -140,7 +140,7 @@ Scope::Scope(Scope* inner_scope,
interface_(NULL),
already_resolved_(true),
zone_(zone) {
- SetDefaults(type, NULL, scope_info);
+ SetDefaults(scope_type, NULL, scope_info);
if (!scope_info.is_null()) {
num_heap_slots_ = scope_info_->ContextLength();
}
@@ -177,11 +177,11 @@ Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
}
-void Scope::SetDefaults(ScopeType type,
+void Scope::SetDefaults(ScopeType scope_type,
Scope* outer_scope,
Handle<ScopeInfo> scope_info) {
outer_scope_ = outer_scope;
- type_ = type;
+ scope_type_ = scope_type;
scope_name_ = isolate_->factory()->empty_string();
dynamics_ = NULL;
receiver_ = NULL;
@@ -780,8 +780,8 @@ void Scope::GetNestedScopeChain(
#ifdef DEBUG
-static const char* Header(ScopeType type) {
- switch (type) {
+static const char* Header(ScopeType scope_type) {
+ switch (scope_type) {
case EVAL_SCOPE: return "eval";
case FUNCTION_SCOPE: return "function";
case MODULE_SCOPE: return "module";
@@ -855,7 +855,7 @@ void Scope::Print(int n) {
int n1 = n0 + 2; // indentation
// Print header.
- Indent(n0, Header(type_));
+ Indent(n0, Header(scope_type_));
if (scope_name_->length() > 0) {
PrintF(" ");
PrintName(scope_name_);
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 66384a1c09..06aaa902cf 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -97,7 +97,7 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Construction
- Scope(Scope* outer_scope, ScopeType type, Zone* zone);
+ Scope(Scope* outer_scope, ScopeType scope_type, Zone* zone);
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
@@ -282,13 +282,13 @@ class Scope: public ZoneObject {
// Predicates.
// Specific scope types.
- bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
- bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
- bool is_module_scope() const { return type_ == MODULE_SCOPE; }
- bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
- bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
- bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
- bool is_with_scope() const { return type_ == WITH_SCOPE; }
+ bool is_eval_scope() const { return scope_type_ == EVAL_SCOPE; }
+ bool is_function_scope() const { return scope_type_ == FUNCTION_SCOPE; }
+ bool is_module_scope() const { return scope_type_ == MODULE_SCOPE; }
+ bool is_global_scope() const { return scope_type_ == GLOBAL_SCOPE; }
+ bool is_catch_scope() const { return scope_type_ == CATCH_SCOPE; }
+ bool is_block_scope() const { return scope_type_ == BLOCK_SCOPE; }
+ bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
bool is_declaration_scope() const {
return is_eval_scope() || is_function_scope() ||
is_module_scope() || is_global_scope();
@@ -321,7 +321,7 @@ class Scope: public ZoneObject {
// Accessors.
// The type of this scope.
- ScopeType type() const { return type_; }
+ ScopeType scope_type() const { return scope_type_; }
// The language mode of this scope.
LanguageMode language_mode() const { return language_mode_; }
@@ -449,7 +449,7 @@ class Scope: public ZoneObject {
ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
// The scope type.
- ScopeType type_;
+ ScopeType scope_type_;
// Debugging support.
Handle<String> scope_name_;
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 3e70edc59e..6c5ccea817 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -558,10 +558,24 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
58,
"Runtime::AllocateInOldPointerSpace");
+ Add(ExternalReference::old_data_space_allocation_top_address(
+ isolate).address(),
+ UNCLASSIFIED,
+ 59,
+ "Heap::OldDataSpaceAllocationTopAddress");
+ Add(ExternalReference::old_data_space_allocation_limit_address(
+ isolate).address(),
+ UNCLASSIFIED,
+ 60,
+ "Heap::OldDataSpaceAllocationLimitAddress");
+ Add(ExternalReference(Runtime::kAllocateInOldDataSpace, isolate).address(),
+ UNCLASSIFIED,
+ 61,
+ "Runtime::AllocateInOldDataSpace");
Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
address(),
UNCLASSIFIED,
- 59,
+ 62,
"Heap::NewSpaceAllocationLimitAddress");
// Add a small set of deopt entry addresses to encoder without generating the
@@ -573,7 +587,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, 60 + entry, "lazy_deopt");
+ Add(address, LAZY_DEOPTIMIZATION, 63 + entry, "lazy_deopt");
}
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index dc9ffd62b1..1b56a882f6 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -408,12 +408,11 @@ class SnapshotByteSink {
class SerializationAddressMapper {
public:
SerializationAddressMapper()
- : serialization_map_(new HashMap(&SerializationMatchFun)),
- no_allocation_(new AssertNoAllocation()) { }
+ : no_allocation_(),
+ serialization_map_(new HashMap(&SerializationMatchFun)) { }
~SerializationAddressMapper() {
delete serialization_map_;
- delete no_allocation_;
}
bool IsMapped(HeapObject* obj) {
@@ -450,8 +449,8 @@ class SerializationAddressMapper {
return reinterpret_cast<void*>(v);
}
+ DisallowHeapAllocation no_allocation_;
HashMap* serialization_map_;
- AssertNoAllocation* no_allocation_;
DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
};
diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc
index 7d73dd5ed1..c650f57ccc 100644
--- a/deps/v8/src/store-buffer.cc
+++ b/deps/v8/src/store-buffer.cc
@@ -188,10 +188,10 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
{ 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
{ 1, 0}
};
- for (int i = kSampleFinenesses - 1; i >= 0; i--) {
+ for (int i = 0; i < kSampleFinenesses; i++) {
ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
// As a last resort we mark all pages as being exempt from the store buffer.
- ASSERT(i != 0 || old_top_ == old_start_);
+ ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
if (old_limit_ - old_top_ > old_top_ - old_start_) return;
}
UNREACHABLE();
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index ebe1b5b43b..109622567a 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -252,6 +252,14 @@ void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
}
+void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
+ FmtElm arg2, FmtElm arg3, FmtElm arg4) {
+ const char argc = 5;
+ FmtElm argv[argc] = { arg0, arg1, arg2, arg3, arg4 };
+ Add(CStrVector(format), Vector<FmtElm>(argv, argc));
+}
+
+
SmartArrayPointer<const char> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
OS::MemCopy(str, buffer_, length_);
@@ -282,7 +290,8 @@ void StringStream::OutputToFile(FILE* out) {
Handle<String> StringStream::ToString() {
- return FACTORY->NewStringFromUtf8(Vector<const char>(buffer_, length_));
+ Factory* factory = Isolate::Current()->factory();
+ return factory->NewStringFromUtf8(Vector<const char>(buffer_, length_));
}
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index 88b4afe115..2367994116 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -137,6 +137,12 @@ class StringStream {
FmtElm arg1,
FmtElm arg2,
FmtElm arg3);
+ void Add(const char* format,
+ FmtElm arg0,
+ FmtElm arg1,
+ FmtElm arg2,
+ FmtElm arg3,
+ FmtElm arg4);
// Getting the message out.
void OutputToFile(FILE* out);
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 4e3a906be9..0f81669960 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -341,6 +341,7 @@ Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name,
PropertyIndex field,
Representation representation) {
if (receiver.is_identical_to(holder)) {
+ // TODO(titzer): this should use an HObjectAccess
KeyedLoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
representation);
@@ -908,8 +909,6 @@ Handle<Code> StubCache::ComputeCallMiss(int argc,
Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
CompareNilICStub& stub) {
- stub.SetKind(kNonStrictEquality);
-
Handle<String> name(isolate_->heap()->empty_string());
if (!receiver_map->is_shared()) {
Handle<Code> cached_ic = FindIC(name, receiver_map, Code::COMPARE_NIL_IC,
@@ -918,6 +917,7 @@ Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
}
Handle<Code> ic = stub.GetCode(isolate_);
+
// For monomorphic maps, use the code as a template, copying and replacing
// the monomorphic map that checks the object's type.
ic = isolate_->factory()->CopyCode(ic);
@@ -1136,7 +1136,7 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
ASSERT(kArgsOffset == 2);
// No ReturnValue in interceptors.
- ASSERT(args.length() == kArgsOffset + PCA::kArgsLength - 1);
+ ASSERT_EQ(kArgsOffset + PCA::kArgsLength - 2, args.length());
// TODO(rossberg): Support symbols in the API.
if (name_handle->IsSymbol())
@@ -1189,8 +1189,8 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
HandleScope scope(isolate);
Handle<Name> name_handle(name);
Handle<Object> error =
- FACTORY->NewReferenceError("not_defined",
- HandleVector(&name_handle, 1));
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name_handle, 1));
return isolate->Throw(*error);
}
@@ -1203,7 +1203,7 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1);
ASSERT(kArgsOffset == 2);
// No ReturnValue in interceptors.
- ASSERT(args->length() == kArgsOffset + PCA::kArgsLength - 1);
+ ASSERT_EQ(kArgsOffset + PCA::kArgsLength - 2, args->length());
Handle<JSObject> receiver_handle =
args->at<JSObject>(kArgsOffset - PCA::kThisIndex);
Handle<JSObject> holder_handle =
@@ -2027,15 +2027,6 @@ Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
}
-Handle<Code> ConstructStubCompiler::GetCode() {
- Code::Flags flags = Code::ComputeFlags(Code::STUB);
- Handle<Code> code = GetCodeWithFlags(flags, "ConstructStub");
- PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, "ConstructStub"));
- GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", *code));
- return code;
-}
-
-
CallOptimization::CallOptimization(LookupResult* lookup) {
if (lookup->IsFound() &&
lookup->IsCacheable() &&
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 9365d96de0..a1b55d8d11 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -1067,17 +1067,6 @@ class CallStubCompiler: public StubCompiler {
};
-class ConstructStubCompiler: public StubCompiler {
- public:
- explicit ConstructStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
-
- Handle<Code> CompileConstructStub(Handle<JSFunction> function);
-
- private:
- Handle<Code> GetCode();
-};
-
-
// Holds information about possible function call optimizations.
class CallOptimization BASE_EMBEDDED {
public:
diff --git a/deps/v8/src/sweeper-thread.cc b/deps/v8/src/sweeper-thread.cc
index f08fcfbc6f..099f5d1879 100644
--- a/deps/v8/src/sweeper-thread.cc
+++ b/deps/v8/src/sweeper-thread.cc
@@ -56,6 +56,10 @@ SweeperThread::SweeperThread(Isolate* isolate)
void SweeperThread::Run() {
Isolate::SetIsolateThreadLocals(isolate_, NULL);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+
while (true) {
start_sweeping_semaphore_->Wait();
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 53866c16cb..5113c550ec 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -78,9 +78,28 @@ static uint32_t IdToKey(TypeFeedbackId ast_id) {
Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
int entry = dictionary_->FindEntry(IdToKey(ast_id));
- return entry != UnseededNumberDictionary::kNotFound
- ? Handle<Object>(dictionary_->ValueAt(entry), isolate_)
- : Handle<Object>::cast(isolate_->factory()->undefined_value());
+ if (entry != UnseededNumberDictionary::kNotFound) {
+ Object* value = dictionary_->ValueAt(entry);
+ if (value->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(value);
+ return Handle<Object>(cell->value(), isolate_);
+ } else {
+ return Handle<Object>(value, isolate_);
+ }
+ }
+ return Handle<Object>::cast(isolate_->factory()->undefined_value());
+}
+
+
+Handle<JSGlobalPropertyCell> TypeFeedbackOracle::GetInfoCell(
+ TypeFeedbackId ast_id) {
+ int entry = dictionary_->FindEntry(IdToKey(ast_id));
+ if (entry != UnseededNumberDictionary::kNotFound) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
+ dictionary_->ValueAt(entry));
+ return Handle<JSGlobalPropertyCell>(cell, isolate_);
+ }
+ return Handle<JSGlobalPropertyCell>::null();
}
@@ -168,12 +187,7 @@ bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
- if (info->IsSmi()) {
- ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <=
- LAST_FAST_ELEMENTS_KIND);
- return isolate_->global_context()->array_function();
- }
- return info->IsJSFunction();
+ return info->IsSmi() || info->IsJSFunction();
}
@@ -184,10 +198,11 @@ bool TypeFeedbackOracle::ObjectLiteralStoreIsMonomorphic(
}
-bool TypeFeedbackOracle::IsForInFastCase(ForInStatement* stmt) {
+byte TypeFeedbackOracle::ForInType(ForInStatement* stmt) {
Handle<Object> value = GetInfo(stmt->ForInFeedbackId());
return value->IsSmi() &&
- Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker;
+ Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker
+ ? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
}
@@ -221,8 +236,8 @@ Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(
Handle<Map> TypeFeedbackOracle::CompareNilMonomorphicReceiverType(
- TypeFeedbackId id) {
- Handle<Object> maybe_code = GetInfo(id);
+ CompareOperation* expr) {
+ Handle<Object> maybe_code = GetInfo(expr->CompareOperationFeedbackId());
if (maybe_code->IsCode()) {
Map* map = Handle<Code>::cast(maybe_code)->FindFirstMap();
if (map == NULL) return Handle<Map>();
@@ -296,33 +311,15 @@ CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
}
-Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
- CheckType check) {
- JSFunction* function = NULL;
- switch (check) {
- case RECEIVER_MAP_CHECK:
- UNREACHABLE();
- break;
- case STRING_CHECK:
- function = native_context_->string_function();
- break;
- case SYMBOL_CHECK:
- function = native_context_->symbol_function();
- break;
- case NUMBER_CHECK:
- function = native_context_->number_function();
- break;
- case BOOLEAN_CHECK:
- function = native_context_->boolean_function();
- break;
- }
- ASSERT(function != NULL);
- return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
-}
-
-
Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->CallFeedbackId()));
+ Handle<Object> info = GetInfo(expr->CallFeedbackId());
+ if (info->IsSmi()) {
+ ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <=
+ LAST_FAST_ELEMENTS_KIND);
+ return Handle<JSFunction>(isolate_->global_context()->array_function());
+ } else {
+ return Handle<JSFunction>::cast(info);
+ }
}
@@ -338,21 +335,12 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
}
-ElementsKind TypeFeedbackOracle::GetCallNewElementsKind(CallNew* expr) {
- Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
- if (info->IsSmi()) {
- return static_cast<ElementsKind>(Smi::cast(*info)->value());
- } else {
- // TODO(mvstanton): avoided calling GetInitialFastElementsKind() for perf
- // reasons. Is there a better fix?
- if (FLAG_packed_arrays) {
- return FAST_SMI_ELEMENTS;
- } else {
- return FAST_HOLEY_SMI_ELEMENTS;
- }
- }
+Handle<JSGlobalPropertyCell> TypeFeedbackOracle::GetCallNewAllocationInfoCell(
+ CallNew* expr) {
+ return GetInfoCell(expr->CallNewFeedbackId());
}
+
Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
ObjectLiteral::Property* prop) {
ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
@@ -480,7 +468,9 @@ static TypeInfo TypeFromBinaryOpType(BinaryOpIC::TypeInfo binary_type) {
void TypeFeedbackOracle::BinaryType(BinaryOperation* expr,
TypeInfo* left,
TypeInfo* right,
- TypeInfo* result) {
+ TypeInfo* result,
+ bool* has_fixed_right_arg,
+ int* fixed_right_arg_value) {
Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) {
@@ -489,12 +479,17 @@ void TypeFeedbackOracle::BinaryType(BinaryOperation* expr,
}
Handle<Code> code = Handle<Code>::cast(object);
if (code->is_binary_op_stub()) {
+ int minor_key = code->stub_info();
BinaryOpIC::TypeInfo left_type, right_type, result_type;
- BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
- &right_type, &result_type);
+ BinaryOpStub::decode_types_from_minor_key(
+ minor_key, &left_type, &right_type, &result_type);
*left = TypeFromBinaryOpType(left_type);
*right = TypeFromBinaryOpType(right_type);
*result = TypeFromBinaryOpType(result_type);
+ *has_fixed_right_arg =
+ BinaryOpStub::decode_has_fixed_right_arg_from_minor_key(minor_key);
+ *fixed_right_arg_value =
+ BinaryOpStub::decode_fixed_right_arg_value_from_minor_key(minor_key);
return;
}
// Not a binary op stub.
@@ -641,8 +636,8 @@ byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId id) {
}
-byte TypeFeedbackOracle::CompareNilTypes(TypeFeedbackId id) {
- Handle<Object> object = GetInfo(id);
+byte TypeFeedbackOracle::CompareNilTypes(CompareOperation* expr) {
+ Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
if (object->IsCode() &&
Handle<Code>::cast(object)->is_compare_nil_ic_stub()) {
return Handle<Code>::cast(object)->compare_nil_types();
@@ -657,7 +652,7 @@ byte TypeFeedbackOracle::CompareNilTypes(TypeFeedbackId id) {
// dictionary (possibly triggering GC), and finally we relocate the collected
// infos before we process them.
void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
ZoneList<RelocInfo> infos(16, zone());
HandleScope scope(isolate_);
GetRelocInfos(code, &infos);
@@ -680,14 +675,14 @@ void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
ZoneList<RelocInfo>* infos) {
- DisableAssertNoAllocation allocation_allowed;
+ AllowHeapAllocation allocation_allowed;
int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
? TypeFeedbackInfo::cast(code->type_feedback_info())->
type_feedback_cells()->CellCount()
: 0;
int length = infos->length() + cell_count;
byte* old_start = code->instruction_start();
- dictionary_ = FACTORY->NewUnseededNumberDictionary(length);
+ dictionary_ = isolate()->factory()->NewUnseededNumberDictionary(length);
byte* new_start = code->instruction_start();
RelocateRelocInfos(infos, old_start, new_start);
}
@@ -764,12 +759,13 @@ void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
for (int i = 0; i < cache->CellCount(); i++) {
TypeFeedbackId ast_id = cache->AstId(i);
- Object* value = cache->Cell(i)->value();
+ JSGlobalPropertyCell* cell = cache->Cell(i);
+ Object* value = cell->value();
if (value->IsSmi() ||
(value->IsJSFunction() &&
!CanRetainOtherContext(JSFunction::cast(value),
*native_context_))) {
- SetInfo(ast_id, value);
+ SetInfo(ast_id, cell);
}
}
}
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index d6d958d56d..53a83be659 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -29,7 +29,6 @@
#define V8_TYPE_INFO_H_
#include "allocation.h"
-#include "ast.h"
#include "globals.h"
#include "zone-inl.h"
@@ -232,6 +231,8 @@ class ICStub;
class Property;
class SmallMapList;
class UnaryOperation;
+class ObjectLiteral;
+class ObjectLiteralProperty;
class TypeFeedbackOracle: public ZoneObject {
@@ -248,13 +249,15 @@ class TypeFeedbackOracle: public ZoneObject {
bool StoreIsPolymorphic(TypeFeedbackId ast_id);
bool CallIsMonomorphic(Call* expr);
bool CallNewIsMonomorphic(CallNew* expr);
- bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property* prop);
+ bool ObjectLiteralStoreIsMonomorphic(ObjectLiteralProperty* prop);
- bool IsForInFastCase(ForInStatement* expr);
+ // TODO(1571) We can't use ForInStatement::ForInType as the return value due
+ // to various cycles in our headers.
+ byte ForInType(ForInStatement* expr);
Handle<Map> LoadMonomorphicReceiverType(Property* expr);
Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId id);
- Handle<Map> CompareNilMonomorphicReceiverType(TypeFeedbackId id);
+ Handle<Map> CompareNilMonomorphicReceiverType(CompareOperation* expr);
KeyedAccessStoreMode GetStoreMode(TypeFeedbackId ast_id);
@@ -278,33 +281,33 @@ class TypeFeedbackOracle: public ZoneObject {
void CollectPolymorphicMaps(Handle<Code> code, SmallMapList* types);
CheckType GetCallCheckType(Call* expr);
- Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
-
Handle<JSFunction> GetCallTarget(Call* expr);
Handle<JSFunction> GetCallNewTarget(CallNew* expr);
- ElementsKind GetCallNewElementsKind(CallNew* expr);
+ Handle<JSGlobalPropertyCell> GetCallNewAllocationInfoCell(CallNew* expr);
- Handle<Map> GetObjectLiteralStoreMap(ObjectLiteral::Property* prop);
+ Handle<Map> GetObjectLiteralStoreMap(ObjectLiteralProperty* prop);
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
bool LoadIsStub(Property* expr, ICStub* stub);
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
- // of various cylces in our headers. Death to tons of implementations in
+ // of various cycles in our headers. Death to tons of implementations in
// headers!! :-P
byte ToBooleanTypes(TypeFeedbackId ast_id);
// TODO(1571) We can't use CompareNilICStub::Types as the return value because
// of various cylces in our headers. Death to tons of implementations in
// headers!! :-P
- byte CompareNilTypes(TypeFeedbackId ast_id);
+ byte CompareNilTypes(CompareOperation* expr);
// Get type information for arithmetic operations and compares.
TypeInfo UnaryType(UnaryOperation* expr);
void BinaryType(BinaryOperation* expr,
TypeInfo* left,
TypeInfo* right,
- TypeInfo* result);
+ TypeInfo* result,
+ bool* has_fixed_right_arg,
+ int* fixed_right_arg_value);
void CompareType(CompareOperation* expr,
TypeInfo* left_type,
TypeInfo* right_type,
@@ -314,6 +317,7 @@ class TypeFeedbackOracle: public ZoneObject {
TypeInfo IncrementType(CountOperation* expr);
Zone* zone() const { return zone_; }
+ Isolate* isolate() const { return isolate_; }
private:
void CollectReceiverTypes(TypeFeedbackId ast_id,
@@ -334,11 +338,11 @@ class TypeFeedbackOracle: public ZoneObject {
// Returns an element from the backing store. Returns undefined if
// there is no information.
- public:
- // TODO(mvstanton): how to get this information without making the method
- // public?
Handle<Object> GetInfo(TypeFeedbackId ast_id);
+ // Return the cell that contains type feedback.
+ Handle<JSGlobalPropertyCell> GetInfoCell(TypeFeedbackId ast_id);
+
private:
Handle<Context> native_context_;
Isolate* isolate_;
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 4fade00e10..04c487f43c 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -94,7 +94,7 @@ function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
} else if (!IS_UNDEFINED(arg1)){
ConstructByArrayLike(this, arg1);
} else {
- throw MakeTypeError("parameterless_typed_array_constr", name);
+ throw MakeTypeError("parameterless_typed_array_constr", [name]);
}
} else {
return new constructor(arg1, arg2, arg3);
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
new file mode 100644
index 0000000000..f7fbd2d69b
--- /dev/null
+++ b/deps/v8/src/types.cc
@@ -0,0 +1,289 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "types.h"
+
+namespace v8 {
+namespace internal {
+
+// Get the smallest bitset subsuming this type.
+int Type::LubBitset() {
+ if (this->is_bitset()) {
+ return this->as_bitset();
+ } else if (this->is_union()) {
+ Handle<Unioned> unioned = this->as_union();
+ int bitset = kNone;
+ for (int i = 0; i < unioned->length(); ++i) {
+ bitset |= union_get(unioned, i)->LubBitset();
+ }
+ return bitset;
+ } else {
+ Map* map = NULL;
+ if (this->is_class()) {
+ map = *this->as_class();
+ } else {
+ v8::internal::Object* value = this->as_constant()->value();
+ if (value->IsSmi()) return kSmi;
+ map = HeapObject::cast(value)->map();
+ }
+ switch (map->instance_type()) {
+ case STRING_TYPE:
+ case ASCII_STRING_TYPE:
+ case CONS_STRING_TYPE:
+ case CONS_ASCII_STRING_TYPE:
+ case SLICED_STRING_TYPE:
+ case EXTERNAL_STRING_TYPE:
+ case EXTERNAL_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_STRING_TYPE:
+ case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case INTERNALIZED_STRING_TYPE:
+ case ASCII_INTERNALIZED_STRING_TYPE:
+ case CONS_INTERNALIZED_STRING_TYPE:
+ case CONS_ASCII_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return kString;
+ case SYMBOL_TYPE:
+ return kSymbol;
+ case ODDBALL_TYPE:
+ return kOddball;
+ case HEAP_NUMBER_TYPE:
+ return kDouble;
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_OBJECT_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_BUILTINS_OBJECT_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_WEAK_MAP_TYPE:
+ case JS_REGEXP_TYPE:
+ return kOtherObject;
+ case JS_ARRAY_TYPE:
+ return kArray;
+ case JS_FUNCTION_TYPE:
+ return kFunction;
+ case JS_PROXY_TYPE:
+ case JS_FUNCTION_PROXY_TYPE:
+ return kProxy;
+ default:
+ UNREACHABLE();
+ return kNone;
+ }
+ }
+}
+
+
+// Get the largest bitset subsumed by this type.
+int Type::GlbBitset() {
+ if (this->is_bitset()) {
+ return this->as_bitset();
+ } else if (this->is_union()) {
+ // All but the first are non-bitsets and thus would yield kNone anyway.
+ return union_get(this->as_union(), 0)->GlbBitset();
+ } else {
+ return kNone;
+ }
+}
+
+
+// Check this <= that.
+bool Type::Is(Handle<Type> that) {
+ // Fast path for bitsets.
+ if (that->is_bitset()) {
+ return (this->LubBitset() | that->as_bitset()) == that->as_bitset();
+ }
+
+ if (that->is_class()) {
+ return this->is_class() && *this->as_class() == *that->as_class();
+ }
+ if (that->is_constant()) {
+ return this->is_constant() &&
+ this->as_constant()->value() == that->as_constant()->value();
+ }
+
+ // (T1 \/ ... \/ Tn) <= T <=> (T1 <= T) /\ ... /\ (Tn <= T)
+ if (this->is_union()) {
+ Handle<Unioned> unioned = this->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> this_i = union_get(unioned, i);
+ if (!this_i->Is(that)) return false;
+ }
+ return true;
+ }
+
+ // T <= (T1 \/ ... \/ Tn) <=> (T <= T1) \/ ... \/ (T <= Tn)
+ // (iff T is not a union)
+ if (that->is_union()) {
+ Handle<Unioned> unioned = that->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> that_i = union_get(unioned, i);
+ if (this->Is(that_i)) return true;
+ if (this->is_bitset()) break; // Fast fail, no other field is a bitset.
+ }
+ return false;
+ }
+
+ return false;
+}
+
+
+// Check this overlaps that.
+bool Type::Maybe(Handle<Type> that) {
+ // Fast path for bitsets.
+ if (this->is_bitset()) {
+ return (this->as_bitset() & that->LubBitset()) != 0;
+ }
+ if (that->is_bitset()) {
+ return (this->LubBitset() & that->as_bitset()) != 0;
+ }
+
+ if (this->is_class()) {
+ return that->is_class() && *this->as_class() == *that->as_class();
+ }
+ if (this->is_constant()) {
+ return that->is_constant() &&
+ this->as_constant()->value() == that->as_constant()->value();
+ }
+
+ // (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
+ if (this->is_union()) {
+ Handle<Unioned> unioned = this->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> this_i = union_get(unioned, i);
+ if (this_i->Maybe(that)) return true;
+ }
+ return false;
+ }
+
+ // T overlaps (T1 \/ ... \/ Tn) <=> (T overlaps T1) \/ ... \/ (T overlaps Tn)
+ if (that->is_union()) {
+ Handle<Unioned> unioned = that->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> that_i = union_get(unioned, i);
+ if (this->Maybe(that_i)) return true;
+ }
+ return false;
+ }
+
+ return false;
+}
+
+
+bool Type::InUnion(Handle<Unioned> unioned, int current_size) {
+ ASSERT(!this->is_union());
+ for (int i = 0; i < current_size; ++i) {
+ Handle<Type> type = union_get(unioned, i);
+ if (type->is_bitset() ? this->Is(type) : this == *type) return true;
+ }
+ return false;
+}
+
+// Get non-bitsets from this which are not subsumed by that, store at unioned,
+// starting at index. Returns updated index.
+int Type::ExtendUnion(Handle<Unioned> result, int current_size) {
+ int old_size = current_size;
+ if (this->is_class() || this->is_constant()) {
+ if (!this->InUnion(result, old_size)) result->set(current_size++, this);
+ } else if (this->is_union()) {
+ Handle<Unioned> unioned = this->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> type = union_get(unioned, i);
+ ASSERT(i == 0 || !(type->is_bitset() || type->Is(union_get(unioned, 0))));
+ if (type->is_bitset()) continue;
+ if (!type->InUnion(result, old_size)) result->set(current_size++, *type);
+ }
+ }
+ return current_size;
+}
+
+
+// Union is O(1) on simple bit unions, but O(n*m) on structured unions.
+// TODO(rossberg): Should we use object sets somehow? Is it worth it?
+Type* Type::Union(Handle<Type> type1, Handle<Type> type2) {
+ // Fast case: bit sets.
+ if (type1->is_bitset() && type2->is_bitset()) {
+ return from_bitset(type1->as_bitset() | type2->as_bitset());
+ }
+
+ // Semi-fast case: Unioned objects are neither involved nor produced.
+ if (!(type1->is_union() || type2->is_union())) {
+ if (type1->Is(type2)) return *type2;
+ if (type2->Is(type1)) return *type1;
+ }
+
+ // Slow case: may need to produce a Unioned object.
+ Isolate* isolate = NULL;
+ int size = type1->is_bitset() || type2->is_bitset() ? 1 : 0;
+ if (!type1->is_bitset()) {
+ isolate = HeapObject::cast(*type1)->GetIsolate();
+ size += (type1->is_union() ? type1->as_union()->length() : 1);
+ }
+ if (!type2->is_bitset()) {
+ isolate = HeapObject::cast(*type2)->GetIsolate();
+ size += (type2->is_union() ? type2->as_union()->length() : 1);
+ }
+ ASSERT(isolate != NULL);
+ ASSERT(size >= 2);
+ Handle<Unioned> unioned = isolate->factory()->NewFixedArray(size);
+ size = 0;
+
+ int bitset = type1->GlbBitset() | type2->GlbBitset();
+ if (bitset != kNone) unioned->set(size++, from_bitset(bitset));
+ size = type1->ExtendUnion(unioned, size);
+ size = type2->ExtendUnion(unioned, size);
+
+ if (size == 1) {
+ return *union_get(unioned, 0);
+ } else if (size == unioned->length()) {
+ return from_handle(unioned);
+ }
+
+ // There was an overlap. Copy to smaller union.
+ Handle<Unioned> result = isolate->factory()->NewFixedArray(size);
+ for (int i = 0; i < size; ++i) result->set(i, unioned->get(i));
+ return from_handle(result);
+}
+
+
+Type* Type::Optional(Handle<Type> type) {
+ return type->is_bitset()
+ ? from_bitset(type->as_bitset() | kUndefined)
+ : Union(type, Undefined()->handle_via_isolate_of(*type));
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
new file mode 100644
index 0000000000..6db9bfbb6a
--- /dev/null
+++ b/deps/v8/src/types.h
@@ -0,0 +1,199 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TYPES_H_
+#define V8_TYPES_H_
+
+#include "v8.h"
+
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+// A simple type system for compiler-internal use. It is based entirely on
+// union types, and all subtyping hence amounts to set inclusion. Besides the
+// obvious primitive types and some predefined unions, the type language also
+// can express class types (a.k.a. specific maps) and singleton types (i.e.,
+// concrete constants).
+//
+// The following equations and inequations hold:
+//
+// None <= T
+// T <= Any
+//
+// Oddball = Boolean \/ Null \/ Undefined
+// Number = Smi \/ Double
+// Name = String \/ Symbol
+// UniqueName = InternalizedString \/ Symbol
+// InternalizedString < String
+//
+// Receiver = Object \/ Proxy
+// Array < Object
+// Function < Object
+//
+// Class(map) < T iff instance_type(map) < T
+// Constant(x) < T iff instance_type(map(x)) < T
+//
+// Note that Constant(x) < Class(map(x)) does _not_ hold, since x's map can
+// change! (Its instance type cannot, however.)
+// TODO(rossberg): the latter is not currently true for proxies, because of fix,
+// but will hold once we implement direct proxies.
+//
+// There are two main functions for testing types:
+//
+// T1->Is(T2) -- tests whether T1 is included in T2 (i.e., T1 <= T2)
+// T1->Maybe(T2) -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
+//
+// Typically, the latter should be used to check whether a specific case needs
+// handling (e.g., via T->Maybe(Number)).
+//
+// There is no functionality to discover whether a type is a leaf in the
+// lattice. That is intentional. It should always be possible to refine the
+// lattice (e.g., splitting up number types further) without invalidating any
+// existing assumptions or tests.
+//
+// Internally, all 'primitive' types, and their unions, are represented as
+// bitsets via smis. Class is a heap pointer to the respective map. Only
+// Constant's, or unions containing Class'es or Constant's, require allocation.
+//
+// The type representation is heap-allocated, so cannot (currently) be used in
+// a parallel compilation context.
+
+class Type : public Object {
+ public:
+ static Type* None() { return from_bitset(kNone); }
+ static Type* Any() { return from_bitset(kAny); }
+
+ static Type* Oddball() { return from_bitset(kOddball); }
+ static Type* Boolean() { return from_bitset(kBoolean); }
+ static Type* Null() { return from_bitset(kNull); }
+ static Type* Undefined() { return from_bitset(kUndefined); }
+
+ static Type* Number() { return from_bitset(kNumber); }
+ static Type* Smi() { return from_bitset(kSmi); }
+ static Type* Double() { return from_bitset(kDouble); }
+
+ static Type* Name() { return from_bitset(kName); }
+ static Type* UniqueName() { return from_bitset(kUniqueName); }
+ static Type* String() { return from_bitset(kString); }
+ static Type* InternalizedString() { return from_bitset(kInternalizedString); }
+ static Type* Symbol() { return from_bitset(kSymbol); }
+
+ static Type* Receiver() { return from_bitset(kReceiver); }
+ static Type* Object() { return from_bitset(kObject); }
+ static Type* Array() { return from_bitset(kArray); }
+ static Type* Function() { return from_bitset(kFunction); }
+ static Type* Proxy() { return from_bitset(kProxy); }
+
+ static Type* Class(Handle<Map> map) { return from_handle(map); }
+ static Type* Constant(Handle<HeapObject> value) {
+ return Constant(value, value->GetIsolate());
+ }
+ static Type* Constant(Handle<v8::internal::Object> value, Isolate* isolate) {
+ return from_handle(isolate->factory()->NewBox(value));
+ }
+
+ static Type* Union(Handle<Type> type1, Handle<Type> type2);
+ static Type* Optional(Handle<Type> type); // type \/ Undefined
+
+ bool Is(Handle<Type> that);
+ bool Maybe(Handle<Type> that);
+
+ // TODO(rossberg): method to iterate unions?
+
+ private:
+ // A union is a fixed array containing types. Invariants:
+ // - its length is at least 2
+ // - at most one field is a bitset, and it must go into index 0
+ // - no field is a union
+ typedef FixedArray Unioned;
+
+ enum {
+ kNull = 1 << 0,
+ kUndefined = 1 << 1,
+ kBoolean = 1 << 2,
+ kSmi = 1 << 3,
+ kDouble = 1 << 4,
+ kSymbol = 1 << 5,
+ kInternalizedString = 1 << 6,
+ kOtherString = 1 << 7,
+ kArray = 1 << 8,
+ kFunction = 1 << 9,
+ kOtherObject = 1 << 10,
+ kProxy = 1 << 11,
+
+ kOddball = kBoolean | kNull | kUndefined,
+ kNumber = kSmi | kDouble,
+ kString = kInternalizedString | kOtherString,
+ kUniqueName = kSymbol | kInternalizedString,
+ kName = kSymbol | kString,
+ kObject = kArray | kFunction | kOtherObject,
+ kReceiver = kObject | kProxy,
+ kAny = kOddball | kNumber | kName | kReceiver,
+ kNone = 0
+ };
+
+ bool is_bitset() { return this->IsSmi(); }
+ bool is_class() { return this->IsMap(); }
+ bool is_constant() { return this->IsBox(); }
+ bool is_union() { return this->IsFixedArray(); }
+
+ int as_bitset() { return Smi::cast(this)->value(); }
+ Handle<Map> as_class() { return Handle<Map>::cast(handle()); }
+ Handle<Box> as_constant() { return Handle<Box>::cast(handle()); }
+ Handle<Unioned> as_union() { return Handle<Unioned>::cast(handle()); }
+
+ Handle<Type> handle() { return handle_via_isolate_of(this); }
+ Handle<Type> handle_via_isolate_of(Type* type) {
+ ASSERT(type->IsHeapObject());
+ return v8::internal::handle(this, HeapObject::cast(type)->GetIsolate());
+ }
+
+ static Type* from_bitset(int bitset) {
+ return static_cast<Type*>(Object::cast(Smi::FromInt(bitset)));
+ }
+ static Type* from_handle(Handle<HeapObject> handle) {
+ return static_cast<Type*>(Object::cast(*handle));
+ }
+
+ static Handle<Type> union_get(Handle<Unioned> unioned, int i) {
+ Type* type = static_cast<Type*>(unioned->get(i));
+ ASSERT(!type->is_union());
+ return type->handle_via_isolate_of(from_handle(unioned));
+ }
+
+ int LubBitset(); // least upper bound that's a bitset
+ int GlbBitset(); // greatest lower bound that's a bitset
+ bool InUnion(Handle<Unioned> unioned, int current_size);
+ int ExtendUnion(Handle<Unioned> unioned, int current_size);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_TYPES_H_
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
new file mode 100644
index 0000000000..4ba67213a1
--- /dev/null
+++ b/deps/v8/src/typing.cc
@@ -0,0 +1,518 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "typing.h"
+
+#include "parser.h" // for CompileTimeValue; TODO(rossberg): should move
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+AstTyper::AstTyper(CompilationInfo* info)
+ : info_(info),
+ oracle_(
+ Handle<Code>(info->closure()->shared()->code()),
+ Handle<Context>(info->closure()->context()->native_context()),
+ info->isolate(),
+ info->zone()) {
+ InitializeAstVisitor();
+}
+
+
+#define CHECK_ALIVE(call) \
+ do { \
+ call; \
+ if (visitor->HasStackOverflow()) return; \
+ } while (false)
+
+
+void AstTyper::Type(CompilationInfo* info) {
+ AstTyper* visitor = new(info->zone()) AstTyper(info);
+ Scope* scope = info->scope();
+
+ // Handle implicit declaration of the function name in named function
+ // expressions before other declarations.
+ if (scope->is_function_scope() && scope->function() != NULL) {
+ CHECK_ALIVE(visitor->VisitVariableDeclaration(scope->function()));
+ }
+ CHECK_ALIVE(visitor->VisitDeclarations(scope->declarations()));
+ CHECK_ALIVE(visitor->VisitStatements(info->function()->body()));
+}
+
+
+#undef CHECK_ALIVE
+#define CHECK_ALIVE(call) \
+ do { \
+ call; \
+ if (HasStackOverflow()) return; \
+ } while (false)
+
+
+void AstTyper::VisitStatements(ZoneList<Statement*>* stmts) {
+ ASSERT(!HasStackOverflow());
+ for (int i = 0; i < stmts->length(); ++i) {
+ Statement* stmt = stmts->at(i);
+ CHECK_ALIVE(Visit(stmt));
+ }
+}
+
+
+void AstTyper::VisitBlock(Block* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(VisitStatements(stmt->statements()));
+}
+
+
+void AstTyper::VisitExpressionStatement(ExpressionStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->expression()));
+}
+
+
+void AstTyper::VisitEmptyStatement(EmptyStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitIfStatement(IfStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->condition()));
+ CHECK_ALIVE(Visit(stmt->then_statement()));
+ CHECK_ALIVE(Visit(stmt->else_statement()));
+
+ if (!stmt->condition()->ToBooleanIsTrue() &&
+ !stmt->condition()->ToBooleanIsFalse()) {
+ stmt->condition()->RecordToBooleanTypeFeedback(oracle());
+ }
+}
+
+
+void AstTyper::VisitContinueStatement(ContinueStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitBreakStatement(BreakStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitReturnStatement(ReturnStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->expression()));
+
+ // TODO(rossberg): we only need this for inlining into test contexts...
+ stmt->expression()->RecordToBooleanTypeFeedback(oracle());
+}
+
+
+void AstTyper::VisitWithStatement(WithStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(stmt->expression());
+ CHECK_ALIVE(stmt->statement());
+}
+
+
+void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->tag()));
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ SwitchStatement::SwitchType switch_type = stmt->switch_type();
+ for (int i = 0; i < clauses->length(); ++i) {
+ CaseClause* clause = clauses->at(i);
+ if (!clause->is_default()) {
+ Expression* label = clause->label();
+ CHECK_ALIVE(Visit(label));
+
+ SwitchStatement::SwitchType label_switch_type =
+ label->IsSmiLiteral() ? SwitchStatement::SMI_SWITCH :
+ label->IsStringLiteral() ? SwitchStatement::STRING_SWITCH :
+ SwitchStatement::GENERIC_SWITCH;
+ if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
+ switch_type = label_switch_type;
+ else if (switch_type != label_switch_type)
+ switch_type = SwitchStatement::GENERIC_SWITCH;
+ }
+ CHECK_ALIVE(VisitStatements(clause->statements()));
+ }
+ if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
+ switch_type = SwitchStatement::GENERIC_SWITCH;
+ stmt->set_switch_type(switch_type);
+
+ // TODO(rossberg): can we eliminate this special case and extra loop?
+ if (switch_type == SwitchStatement::SMI_SWITCH) {
+ for (int i = 0; i < clauses->length(); ++i) {
+ CaseClause* clause = clauses->at(i);
+ if (!clause->is_default())
+ clause->RecordTypeFeedback(oracle());
+ }
+ }
+}
+
+
+void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->body()));
+ CHECK_ALIVE(Visit(stmt->cond()));
+
+ if (!stmt->cond()->ToBooleanIsTrue()) {
+ stmt->cond()->RecordToBooleanTypeFeedback(oracle());
+ }
+}
+
+
+void AstTyper::VisitWhileStatement(WhileStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->cond()));
+ CHECK_ALIVE(Visit(stmt->body()));
+
+ if (!stmt->cond()->ToBooleanIsTrue()) {
+ stmt->cond()->RecordToBooleanTypeFeedback(oracle());
+ }
+}
+
+
+void AstTyper::VisitForStatement(ForStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ if (stmt->init() != NULL) {
+ CHECK_ALIVE(Visit(stmt->init()));
+ }
+ if (stmt->cond() != NULL) {
+ CHECK_ALIVE(Visit(stmt->cond()));
+
+ stmt->cond()->RecordToBooleanTypeFeedback(oracle());
+ }
+ CHECK_ALIVE(Visit(stmt->body()));
+ if (stmt->next() != NULL) {
+ CHECK_ALIVE(Visit(stmt->next()));
+ }
+}
+
+
+void AstTyper::VisitForInStatement(ForInStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->enumerable()));
+ CHECK_ALIVE(Visit(stmt->body()));
+
+ stmt->RecordTypeFeedback(oracle());
+}
+
+
+void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->iterable()));
+ CHECK_ALIVE(Visit(stmt->body()));
+}
+
+
+void AstTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->try_block()));
+ CHECK_ALIVE(Visit(stmt->catch_block()));
+}
+
+
+void AstTyper::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->try_block()));
+ CHECK_ALIVE(Visit(stmt->finally_block()));
+}
+
+
+void AstTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitSharedFunctionInfoLiteral(SharedFunctionInfoLiteral* expr) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitConditional(Conditional* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->condition()));
+ CHECK_ALIVE(Visit(expr->then_expression()));
+ CHECK_ALIVE(Visit(expr->else_expression()));
+
+ expr->condition()->RecordToBooleanTypeFeedback(oracle());
+}
+
+
+void AstTyper::VisitVariableProxy(VariableProxy* expr) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitLiteral(Literal* expr) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
+ ASSERT(!HasStackOverflow());
+ ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+ for (int i = 0; i < properties->length(); ++i) {
+ ObjectLiteral::Property* prop = properties->at(i);
+ CHECK_ALIVE(Visit(prop->value()));
+
+ if ((prop->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL &&
+ !CompileTimeValue::IsCompileTimeValue(prop->value())) ||
+ prop->kind() == ObjectLiteral::Property::COMPUTED) {
+ if (prop->key()->handle()->IsInternalizedString() && prop->emit_store())
+ prop->RecordTypeFeedback(oracle());
+ }
+ }
+}
+
+
+void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
+ ASSERT(!HasStackOverflow());
+ ZoneList<Expression*>* values = expr->values();
+ for (int i = 0; i < values->length(); ++i) {
+ Expression* value = values->at(i);
+ CHECK_ALIVE(Visit(value));
+ }
+}
+
+
+void AstTyper::VisitAssignment(Assignment* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->target()));
+ CHECK_ALIVE(Visit(expr->value()));
+
+ // TODO(rossberg): Can we clean this up?
+ if (expr->is_compound()) {
+ CHECK_ALIVE(Visit(expr->binary_operation()));
+
+ Expression* target = expr->target();
+ Property* prop = target->AsProperty();
+ if (prop != NULL) {
+ prop->RecordTypeFeedback(oracle(), zone());
+ if (!prop->key()->IsPropertyName()) // i.e., keyed
+ expr->RecordTypeFeedback(oracle(), zone());
+ }
+ return;
+ }
+ if (expr->target()->AsProperty())
+ expr->RecordTypeFeedback(oracle(), zone());
+}
+
+
+void AstTyper::VisitYield(Yield* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->generator_object()));
+ CHECK_ALIVE(Visit(expr->expression()));
+}
+
+
+void AstTyper::VisitThrow(Throw* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->exception()));
+}
+
+
+void AstTyper::VisitProperty(Property* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->obj()));
+ CHECK_ALIVE(Visit(expr->key()));
+
+ expr->RecordTypeFeedback(oracle(), zone());
+}
+
+
+void AstTyper::VisitCall(Call* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ CHECK_ALIVE(Visit(arg));
+ }
+
+ Expression* callee = expr->expression();
+ Property* prop = callee->AsProperty();
+ if (prop != NULL) {
+ if (prop->key()->IsPropertyName())
+ expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
+ } else {
+ expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
+ }
+}
+
+
+void AstTyper::VisitCallNew(CallNew* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ CHECK_ALIVE(Visit(arg));
+ }
+
+ expr->RecordTypeFeedback(oracle());
+}
+
+
+void AstTyper::VisitCallRuntime(CallRuntime* expr) {
+ ASSERT(!HasStackOverflow());
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ CHECK_ALIVE(Visit(arg));
+ }
+}
+
+
+void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->expression()));
+
+ expr->RecordTypeFeedback(oracle());
+ if (expr->op() == Token::NOT) {
+ // TODO(rossberg): only do in test or value context.
+ expr->expression()->RecordToBooleanTypeFeedback(oracle());
+ }
+}
+
+
+void AstTyper::VisitCountOperation(CountOperation* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->expression()));
+
+ expr->RecordTypeFeedback(oracle(), zone());
+ Property* prop = expr->expression()->AsProperty();
+ if (prop != NULL) {
+ prop->RecordTypeFeedback(oracle(), zone());
+ }
+}
+
+
+void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->left()));
+ CHECK_ALIVE(Visit(expr->right()));
+
+ expr->RecordTypeFeedback(oracle());
+ if (expr->op() == Token::OR || expr->op() == Token::AND) {
+ expr->left()->RecordToBooleanTypeFeedback(oracle());
+ }
+}
+
+
+void AstTyper::VisitCompareOperation(CompareOperation* expr) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(expr->left()));
+ CHECK_ALIVE(Visit(expr->right()));
+
+ expr->RecordTypeFeedback(oracle());
+}
+
+
+void AstTyper::VisitThisFunction(ThisFunction* expr) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
+ ASSERT(!HasStackOverflow());
+ for (int i = 0; i < decls->length(); ++i) {
+ Declaration* decl = decls->at(i);
+ CHECK_ALIVE(Visit(decl));
+ }
+}
+
+
+void AstTyper::VisitVariableDeclaration(VariableDeclaration* declaration) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(declaration->fun()));
+}
+
+
+void AstTyper::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(declaration->module()));
+}
+
+
+void AstTyper::VisitImportDeclaration(ImportDeclaration* declaration) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(declaration->module()));
+}
+
+
+void AstTyper::VisitExportDeclaration(ExportDeclaration* declaration) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitModuleLiteral(ModuleLiteral* module) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(module->body()));
+}
+
+
+void AstTyper::VisitModuleVariable(ModuleVariable* module) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitModulePath(ModulePath* module) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(module->module()));
+}
+
+
+void AstTyper::VisitModuleUrl(ModuleUrl* module) {
+ ASSERT(!HasStackOverflow());
+}
+
+
+void AstTyper::VisitModuleStatement(ModuleStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->body()));
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/typing.h b/deps/v8/src/typing.h
new file mode 100644
index 0000000000..d8708c2ccb
--- /dev/null
+++ b/deps/v8/src/typing.h
@@ -0,0 +1,77 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TYPING_H_
+#define V8_TYPING_H_
+
+#include "v8.h"
+
+#include "allocation.h"
+#include "ast.h"
+#include "compiler.h"
+#include "type-info.h"
+#include "zone.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+class AstTyper: public AstVisitor {
+ public:
+ static void Type(CompilationInfo* info);
+
+ void* operator new(size_t size, Zone* zone) {
+ return zone->New(static_cast<int>(size));
+ }
+ void operator delete(void* pointer, Zone* zone) { }
+ void operator delete(void* pointer) { }
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ private:
+ explicit AstTyper(CompilationInfo* info);
+
+ CompilationInfo* info_;
+ TypeFeedbackOracle oracle_;
+
+ TypeFeedbackOracle* oracle() { return &oracle_; }
+ Zone* zone() const { return info_->zone(); }
+
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ DISALLOW_COPY_AND_ASSIGN(AstTyper);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_TYPING_H_
diff --git a/deps/v8/src/uri.h b/deps/v8/src/uri.h
index c7a6301f12..ee1baeb512 100644
--- a/deps/v8/src/uri.h
+++ b/deps/v8/src/uri.h
@@ -93,7 +93,7 @@ const signed char URIUnescape::kHexValue[] = {
template<typename Char>
Handle<String> URIUnescape::Unescape(Isolate* isolate, Handle<String> source) {
int index;
- { AssertNoAllocation no_allocation;
+ { DisallowHeapAllocation no_allocation;
StringSearch<uint8_t, Char> search(isolate, STATIC_ASCII_VECTOR("%"));
index = search.Search(GetCharVector<Char>(source), 0);
if (index < 0) return source;
@@ -109,7 +109,7 @@ Handle<String> URIUnescape::UnescapeSlow(
int length = string->length();
int unescaped_length = 0;
- { AssertNoAllocation no_allocation;
+ { DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; unescaped_length++) {
int step;
@@ -130,7 +130,7 @@ Handle<String> URIUnescape::UnescapeSlow(
if (one_byte) {
Handle<SeqOneByteString> dest =
isolate->factory()->NewRawOneByteString(unescaped_length);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
int step;
@@ -142,7 +142,7 @@ Handle<String> URIUnescape::UnescapeSlow(
} else {
Handle<SeqTwoByteString> dest =
isolate->factory()->NewRawTwoByteString(unescaped_length);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
int step;
@@ -249,7 +249,7 @@ Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
int escaped_length = 0;
int length = string->length();
- { AssertNoAllocation no_allocation;
+ { DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = 0; i < length; i++) {
uint16_t c = vector[i];
@@ -277,7 +277,7 @@ Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
isolate->factory()->NewRawOneByteString(escaped_length);
int dest_position = 0;
- { AssertNoAllocation no_allocation;
+ { DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = 0; i < length; i++) {
uint16_t c = vector[i];
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 93cded18bc..4a08319044 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -86,6 +86,25 @@ inline int WhichPowerOf2(uint32_t x) {
}
+inline int MostSignificantBit(uint32_t x) {
+ static const int msb4[] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4};
+ int nibble = 0;
+ if (x & 0xffff0000) {
+ nibble += 16;
+ x >>= 16;
+ }
+ if (x & 0xff00) {
+ nibble += 8;
+ x >>= 8;
+ }
+ if (x & 0xf0) {
+ nibble += 4;
+ x >>= 4;
+ }
+ return nibble + msb4[x];
+}
+
+
// Magic numbers for integer division.
// These are kind of 2's complement reciprocal of the divisors.
// Details and proofs can be found in:
@@ -232,6 +251,20 @@ T Min(T a, T b) {
}
+// Returns the absolute value of its argument.
+template <typename T>
+T Abs(T a) {
+ return a < 0 ? -a : a;
+}
+
+
+// Returns the negative absolute value of its argument.
+template <typename T>
+T NegAbs(T a) {
+ return a < 0 ? a : -a;
+}
+
+
inline int StrLength(const char* string) {
size_t length = strlen(string);
ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index e21c815ff5..80b12deea6 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -56,6 +56,7 @@ bool V8::has_been_disposed_ = false;
bool V8::has_fatal_error_ = false;
bool V8::use_crankshaft_ = true;
List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
+v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 5ead877e55..b8a5ae4380 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -56,6 +56,7 @@
#include "v8globals.h"
#include "v8checks.h"
#include "allocation.h"
+#include "assert-scope.h"
#include "v8utils.h"
#include "flags.h"
@@ -120,6 +121,15 @@ class V8 : public AllStatic {
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
static void FireCallCompletedCallback(Isolate* isolate);
+ static v8::ArrayBuffer::Allocator* ArrayBufferAllocator() {
+ return array_buffer_allocator_;
+ }
+
+ static void SetArrayBufferAllocator(v8::ArrayBuffer::Allocator *allocator) {
+ CHECK_EQ(NULL, array_buffer_allocator_);
+ array_buffer_allocator_ = allocator;
+ }
+
private:
static void InitializeOncePerProcessImpl();
static void InitializeOncePerProcess();
@@ -138,6 +148,8 @@ class V8 : public AllStatic {
static bool use_crankshaft_;
// List of callbacks when a Call completes.
static List<CallCompletedCallback>* call_completed_callbacks_;
+ // Allocator for external array buffers.
+ static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
};
@@ -145,10 +157,6 @@ class V8 : public AllStatic {
enum NilValue { kNullValue, kUndefinedValue };
-// JavaScript defines two kinds of equality.
-enum EqualityKind { kStrictEquality, kNonStrictEquality };
-
-
} } // namespace v8::internal
namespace i = v8::internal;
diff --git a/deps/v8/src/v8conversions.h b/deps/v8/src/v8conversions.h
index 9d618af980..3a7b5242ab 100644
--- a/deps/v8/src/v8conversions.h
+++ b/deps/v8/src/v8conversions.h
@@ -58,7 +58,7 @@ double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
// Converts a number into size_t.
inline size_t NumberToSize(Isolate* isolate,
Object* number) {
- NoHandleAllocation hc(isolate);
+ SealHandleScope shs(isolate);
if (number->IsSmi()) {
return Smi::cast(number)->value();
} else {
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 17068937eb..98940c58e3 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -111,7 +111,6 @@ class AccessorInfo;
class Allocation;
class Arguments;
class Assembler;
-class AssertNoAllocation;
class Code;
class CodeGenerator;
class CodeStub;
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index b2ea749c73..e168b71abc 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -873,6 +873,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
// Step 3 - Special handling for length property.
if (p === "length") {
var length = obj.length;
+ var old_length = length;
if (!desc.hasValue()) {
return DefineObjectProperty(obj, "length", desc, should_throw);
}
@@ -889,8 +890,24 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
}
}
var threw = false;
+
+ var emit_splice = %IsObserved(obj) && new_length !== old_length;
+ var removed;
+ if (emit_splice) {
+ BeginPerformSplice(obj);
+ removed = [];
+ if (new_length < old_length)
+ removed.length = old_length - new_length;
+ }
+
while (new_length < length--) {
- if (!Delete(obj, ToString(length), false)) {
+ var index = ToString(length);
+ if (emit_splice) {
+ var deletedDesc = GetOwnProperty(obj, index);
+ if (deletedDesc && deletedDesc.hasValue())
+ removed[length - new_length] = deletedDesc.getValue();
+ }
+ if (!Delete(obj, index, false)) {
new_length = length + 1;
threw = true;
break;
@@ -902,13 +919,17 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
// respective TODO in Runtime_DefineOrRedefineDataProperty.
// For the time being, we need a hack to prevent Object.observe from
// generating two change records.
- var isObserved = %IsObserved(obj);
- if (isObserved) %SetIsObserved(obj, false);
obj.length = new_length;
desc.value_ = void 0;
desc.hasValue_ = false;
threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw;
- if (isObserved) %SetIsObserved(obj, true);
+ if (emit_splice) {
+ EndPerformSplice(obj);
+ EnqueueSpliceRecord(obj,
+ new_length < old_length ? new_length : old_length,
+ removed,
+ new_length > old_length ? new_length - old_length : 0);
+ }
if (threw) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
@@ -916,27 +937,24 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
return false;
}
}
- if (isObserved) {
- var new_desc = GetOwnProperty(obj, "length");
- var updated = length_desc.value_ !== new_desc.value_;
- var reconfigured = length_desc.writable_ !== new_desc.writable_ ||
- length_desc.configurable_ !== new_desc.configurable_ ||
- length_desc.enumerable_ !== new_desc.configurable_;
- if (updated || reconfigured) {
- NotifyChange(reconfigured ? "reconfigured" : "updated",
- obj, "length", length_desc.value_);
- }
- }
return true;
}
// Step 4 - Special handling for array index.
var index = ToUint32(p);
+ var emit_splice = false;
if (ToString(index) == p && index != 4294967295) {
var length = obj.length;
+ if (index >= length && %IsObserved(obj)) {
+ emit_splice = true;
+ BeginPerformSplice(obj);
+ }
+
var length_desc = GetOwnProperty(obj, "length");
if ((index >= length && !length_desc.isWritable()) ||
!DefineObjectProperty(obj, p, desc, true)) {
+ if (emit_splice)
+ EndPerformSplice(obj);
if (should_throw) {
throw MakeTypeError("define_disallowed", [p]);
} else {
@@ -946,6 +964,10 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
if (index >= length) {
obj.length = index + 1;
}
+ if (emit_splice) {
+ EndPerformSplice(obj);
+ EnqueueSpliceRecord(obj, length, [], index + 1 - length);
+ }
return true;
}
@@ -1225,20 +1247,27 @@ function ObjectFreeze(obj) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("called_on_non_object", ["Object.freeze"]);
}
- if (%IsJSProxy(obj)) {
- ProxyFix(obj);
- }
- var names = ObjectGetOwnPropertyNames(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnProperty(obj, name);
- if (desc.isWritable() || desc.isConfigurable()) {
- if (IsDataDescriptor(desc)) desc.setWritable(false);
- desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
+ var isProxy = %IsJSProxy(obj);
+ if (isProxy || %HasNonStrictArgumentsElements(obj)) {
+ if (isProxy) {
+ ProxyFix(obj);
+ }
+ var names = ObjectGetOwnPropertyNames(obj);
+ for (var i = 0; i < names.length; i++) {
+ var name = names[i];
+ var desc = GetOwnProperty(obj, name);
+ if (desc.isWritable() || desc.isConfigurable()) {
+ if (IsDataDescriptor(desc)) desc.setWritable(false);
+ desc.setConfigurable(false);
+ DefineOwnProperty(obj, name, desc, true);
+ }
}
+ %PreventExtensions(obj);
+ } else {
+ // TODO(adamk): Is it worth going to this fast path if the
+ // object's properties are already in dictionary mode?
+ %ObjectFreeze(obj);
}
- %PreventExtensions(obj);
return obj;
}
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index cec040ae46..bad15cf2e0 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 19
-#define BUILD_NUMBER 3
+#define BUILD_NUMBER 13
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 802696b54f..f547e7947f 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -1590,7 +1590,7 @@ void Assembler::movl(const Operand& dst, Label* src) {
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
- ALLOW_HANDLE_DEREF(isolate(), "using and embedding raw address");
+ AllowDeferredHandleDereference using_raw_address;
// If there is no relocation info, emit the value of the handle efficiently
// (possibly using less that 8 bytes for the value).
if (RelocInfo::IsNone(mode)) {
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index b1b99b473b..2a01b0b24c 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -1477,14 +1477,20 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ } else {
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->InternalArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+ }
}
@@ -1511,14 +1517,24 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ // tail call a stub
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ Move(rbx, undefined_sentinel);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ } else {
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+ }
}
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index f87d952568..bc2e59a41b 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -30,7 +30,6 @@
#if defined(V8_TARGET_ARCH_X64)
#include "bootstrapper.h"
-#include "builtins-decls.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
@@ -46,7 +45,6 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
static Register registers[] = { rax, rbx, rcx };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
}
@@ -58,7 +56,6 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
static Register registers[] = { rax, rbx, rcx, rdx };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
}
@@ -81,7 +78,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@@ -92,7 +88,6 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { rdx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@@ -137,7 +132,29 @@ static void InitializeArrayConstructorDescriptor(
descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // rax -- number of arguments
+ // rdi -- constructor function
+ static Register registers[] = { rdi };
+ descriptor->register_param_count_ = 1;
+
+ if (constant_stack_parameter_count != 0) {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &rax;
+ }
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
}
@@ -162,6 +179,27 @@ void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
void CompareNilICStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -170,8 +208,21 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
- descriptor->miss_handler_ =
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+}
+
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
}
@@ -194,7 +245,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
for (int i = 0; i < param_count; ++i) {
__ push(descriptor->register_params_[i]);
}
- ExternalReference miss = descriptor->miss_handler_;
+ ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
}
@@ -462,106 +513,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-// The stub expects its argument on the stack and returns its result in tos_:
-// zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- const Register argument = rax;
- const Register map = rdx;
-
- if (!types_.IsEmpty()) {
- __ movq(argument, Operand(rsp, 1 * kPointerSize));
- }
-
- // undefined -> false
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- Label not_smi;
- __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
- // argument contains the correct return value already
- if (!tos_.is(argument)) {
- __ movq(tos_, argument);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_smi);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(argument, &patch, Label::kNear);
- }
-
- if (types_.NeedsMap()) {
- __ movq(map, FieldOperand(argument, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- Label not_undetectable;
- __ j(zero, &not_undetectable, Label::kNear);
- __ Set(tos_, 0);
- __ ret(1 * kPointerSize);
- __ bind(&not_undetectable);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // spec object -> true.
- Label not_js_object;
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, &not_js_object, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_js_object);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ movq(tos_, FieldOperand(argument, String::kLengthOffset));
- __ ret(1 * kPointerSize); // the string length is OK as the return value
- __ bind(&not_string);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number, false_result;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(argument, HeapNumber::kValueOffset));
- __ j(zero, &false_result, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ Set(tos_, 0);
- __ ret(1 * kPointerSize);
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ PushCallerSaved(save_doubles_);
const int argument_count = 1;
@@ -578,44 +529,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- const Register argument = rax;
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- Label different_value;
- __ CompareRoot(argument, value);
- __ j(not_equal, &different_value, Label::kNear);
- if (!result) {
- // If we have to return zero, there is no way around clearing tos_.
- __ Set(tos_, 0);
- } else if (!tos_.is(argument)) {
- // If we have to return non-zero, we can re-use the argument if it is the
- // same register as the result, because we never see Smi-zero here.
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&different_value);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Get return address, operand is now on top of stack.
- __ Push(Smi::FromInt(tos_.code()));
- __ Push(Smi::FromInt(types_.ToByte()));
- __ push(rcx); // Push return address.
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
class FloatingPointHelper : public AllStatic {
public:
enum ConvertUndefined {
@@ -1317,7 +1230,15 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
+ Label right_arg_changed, call_runtime;
+
+ if (op_ == Token::MOD && has_fixed_right_arg_) {
+ // It is guaranteed that the value will fit into a Smi, because if it
+ // didn't, we wouldn't be here, see BinaryOp_Patch.
+ __ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
+ __ j(not_equal, &right_arg_changed);
+ }
+
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
@@ -1331,6 +1252,7 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
// Code falls through if the result is not returned as either a smi or heap
// number.
+ __ bind(&right_arg_changed);
GenerateTypeTransition(masm);
if (call_runtime.is_linked()) {
@@ -3564,7 +3486,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_for_nan);
}
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
Label heap_number;
// If it's not a heap number, then return equal for (in)equality operator.
@@ -3801,7 +3723,6 @@ static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// megamorphic.
// rbx : cache cell for call target
// rdi : the function to call
- ASSERT(!FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done;
@@ -4520,7 +4441,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int kOffsetToResultValue = 18;
// The last 4 bytes of the instruction sequence
// movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
- // Move(kScratchRegister, FACTORY->the_hole_value())
+ // Move(kScratchRegister, Factory::the_hole_value())
// in front of the hole value address.
static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
// The last 4 bytes of the instruction sequence
@@ -6834,17 +6755,17 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Calculate the original stack pointer and store it in the second arg.
#ifdef _WIN64
- __ lea(rdx, Operand(rsp, kNumSavedRegisters * kPointerSize));
+ __ lea(rdx, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
#else
- __ lea(rsi, Operand(rsp, kNumSavedRegisters * kPointerSize));
+ __ lea(rsi, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
#endif
// Calculate the function address to the first arg.
#ifdef _WIN64
- __ movq(rcx, Operand(rdx, 0));
+ __ movq(rcx, Operand(rsp, kNumSavedRegisters * kPointerSize));
__ subq(rcx, Immediate(Assembler::kShortCallInstructionLength));
#else
- __ movq(rdi, Operand(rsi, 0));
+ __ movq(rdi, Operand(rsp, kNumSavedRegisters * kPointerSize));
__ subq(rdi, Immediate(Assembler::kShortCallInstructionLength));
#endif
@@ -6955,6 +6876,10 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
+ if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ }
}
}
@@ -6969,6 +6894,20 @@ void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
}
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ }
+}
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -7056,6 +6995,108 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ testq(rax, rax);
+ __ j(not_zero, &not_zero_case);
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ bind(&not_zero_case);
+ __ cmpl(rax, Immediate(1));
+ __ j(greater, &not_one_case);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ movq(rcx, Operand(rsp, kPointerSize));
+ __ testq(rcx, rcx);
+ __ j(zero, &normal_sequence);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+ }
+
+ __ bind(&normal_sequence);
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ bind(&not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- ebx : type info cell
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
+ __ Check(not_smi, "Unexpected initial map for Array function");
+ __ CmpObjectType(rcx, MAP_TYPE, rcx);
+ __ Check(equal, "Unexpected initial map for Array function");
+ }
+
+ if (FLAG_optimize_constructed_arrays) {
+ // Figure out the right elements kind
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(rcx, Immediate(Map::kElementsKindMask));
+ __ shr(rcx, Immediate(Map::kElementsKindShift));
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &done);
+ __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ Assert(equal,
+ "Invalid ElementsKind for InternalArray or InternalPackedArray");
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 6f32f7cf04..2ac56a144c 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -624,46 +624,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
}
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ Check(masm->CheckSmi(index), "Non-smi index");
- __ Check(masm->CheckSmi(value), "Non-smi value");
-
- __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, "Index is too large");
-
- __ SmiCompare(index, Smi::FromInt(0));
- __ Check(greater_equal, "Index is negative");
-
- __ push(value);
- __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, "Unexpected string type");
- __ pop(value);
- }
-
- __ SmiToInteger32(value, value);
- __ SmiToInteger32(index, index);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- } else {
- __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
- value);
- }
-}
-
-
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
XMMRegister input,
XMMRegister result,
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 3a7646bd1b..5747e0bc6f 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -51,7 +51,7 @@ class CodeGenerator: public AstVisitor {
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
+ static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index a579f52e7b..21682c2708 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -50,7 +50,7 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation nha;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 22c6fae187..e9fe2a8cd2 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -652,9 +652,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ push(result_register());
- __ CallStub(&stub, condition->test_id());
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
__ testq(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -1047,9 +1046,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &exit);
@@ -1225,6 +1223,64 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(rax, &convert);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &done_convert);
+ __ bind(&convert);
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@@ -1956,10 +2012,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 1 * kPointerSize] iter
// [sp + 0 * kPointerSize] g
- Label l_catch, l_try, l_resume, l_send, l_call, l_loop;
+ Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
// Initial send value is undefined.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ jmp(&l_send);
+ __ jmp(&l_next);
// catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
__ bind(&l_catch);
@@ -1989,15 +2045,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_resume); // received in rax
__ PopTryHandler();
- // receiver = iter; f = iter.send; arg = received;
- __ bind(&l_send);
+ // receiver = iter; f = iter.next; arg = received;
+ __ bind(&l_next);
__ movq(rcx, Operand(rsp, 1 * kPointerSize)); // iter
__ push(rcx); // iter
__ push(rax); // received
__ movq(rax, rcx); // iter
- __ LoadRoot(rcx, Heap::ksend_stringRootIndex); // "send"
- Handle<Code> send_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(send_ic); // iter.send in rax
+ __ LoadRoot(rcx, Heap::knext_stringRootIndex); // "next"
+ Handle<Code> next_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(next_ic); // iter.next in rax
// result = f.call(receiver, arg);
__ bind(&l_call);
@@ -2028,10 +2084,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done"
Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(done_ic); // result.done in rax
- ToBooleanStub stub(rax);
- __ push(rax);
- __ CallStub(&stub);
- __ testq(rax, rax);
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ testq(result_register(), result_register());
__ j(zero, &l_try);
// result.value
@@ -2100,7 +2155,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
- if (resume_mode == JSGeneratorObject::SEND) {
+ if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
__ cmpq(rdx, Immediate(0));
__ j(not_zero, &slow_resume);
@@ -2936,7 +2991,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ jmp(&entry);
__ bind(&loop);
__ movq(rdx, FieldOperand(rbx, 0));
- __ Cmp(rdx, FACTORY->value_of_string());
+ __ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
__ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
@@ -3345,19 +3400,54 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ __ Check(masm()->CheckSmi(index), "Non-smi index");
+ __ Check(masm()->CheckSmi(value), "Non-smi value");
+
+ __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
+ __ Check(less, "Index is too large");
+
+ __ SmiCompare(index, Smi::FromInt(0));
+ __ Check(greater_equal, "Index is negative");
+
+ __ push(value);
+ __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ cmpq(value, Immediate(encoding_mask));
+ __ Check(equal, "Unexpected string type");
+ __ pop(value);
+}
+
+
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
+ Register string = rax;
+ Register index = rbx;
+ Register value = rcx;
+
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(rcx);
- __ pop(rbx);
+ __ pop(value);
+ __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
- context()->Plug(rax);
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
+ __ SmiToInteger32(value, value);
+ __ SmiToInteger32(index, index);
+ __ movb(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
+ value);
+ context()->Plug(string);
}
@@ -3365,14 +3455,25 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
+ Register string = rax;
+ Register index = rbx;
+ Register value = rcx;
+
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(rcx);
- __ pop(rbx);
+ __ pop(value);
+ __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
+ if (FLAG_debug_code) {
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ }
+
+ __ SmiToInteger32(value, value);
+ __ SmiToInteger32(index, index);
+ __ movw(FieldOperand(string, index, times_2, SeqTwoByteString::kHeaderSize),
+ value);
context()->Plug(rax);
}
@@ -4653,18 +4754,14 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- EqualityKind kind = expr->op() == Token::EQ_STRICT
- ? kStrictEquality : kNonStrictEquality;
- if (kind == kStrictEquality) {
+ if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
__ CompareRoot(rax, nil_value);
Split(equal, if_true, if_false, fall_through);
} else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
- kNonStrictEquality,
- nil);
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
__ testq(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 6425f89416..efb41c85ec 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -1380,6 +1380,23 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rax); // receiver
+ __ push(rcx); // name
+ __ push(rbx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- rax : key
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 9a1ce98009..f423133cf1 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -431,7 +431,13 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsInteger32();
+ chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+ return op->IsConstantOperand() &&
+ chunk_->LookupLiteralRepresentation(op).IsSmi();
}
@@ -447,6 +453,12 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
double LCodeGen::ToDouble(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(constant->HasDoubleValue());
@@ -456,7 +468,7 @@ double LCodeGen::ToDouble(LConstantOperand* op) const {
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
return constant->handle();
}
@@ -825,8 +837,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { ALLOW_HANDLE_DEREF(isolate(),
- "copying a ZoneList of handles into a FixedArray");
+ { AllowDeferredHandleDereference copy_handles;
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
@@ -1034,111 +1045,116 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ HMod* hmod = instr->hydrogen();
+ HValue* left = hmod->left();
+ HValue* right = hmod->right();
+ if (hmod->HasPowerOf2Divisor()) {
+ // TODO(svenpanne) We should really do the strength reduction on the
+ // Hydrogen level.
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(ToRegister(instr->result())));
- if (divisor < 0) divisor = -divisor;
+ // Note: The code below even works when right contains kMinInt.
+ int32_t divisor = Abs(right->GetInteger32Constant());
- Label positive_dividend, done;
- __ testl(dividend, dividend);
- __ j(not_sign, &positive_dividend, Label::kNear);
- __ negl(dividend);
- __ andl(dividend, Immediate(divisor - 1));
- __ negl(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ j(not_zero, &done, Label::kNear);
- DeoptimizeIf(no_condition, instr->environment());
- } else {
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &left_is_not_negative, Label::kNear);
+ __ negl(left_reg);
+ __ andl(left_reg, Immediate(divisor - 1));
+ __ negl(left_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
__ jmp(&done, Label::kNear);
}
- __ bind(&positive_dividend);
- __ andl(dividend, Immediate(divisor - 1));
+
+ __ bind(&left_is_not_negative);
+ __ andl(left_reg, Immediate(divisor - 1));
__ bind(&done);
- } else {
- Label done, remainder_eq_dividend, slow, both_positive;
+
+ } else if (hmod->has_fixed_right_arg()) {
Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(ToRegister(instr->result())));
Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
+ int32_t divisor = hmod->fixed_right_arg_value();
+ ASSERT(IsPowerOf2(divisor));
+
+ // Check if our assumption of a fixed right operand still holds.
+ __ cmpl(right_reg, Immediate(divisor));
+ DeoptimizeIf(not_equal, instr->environment());
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &left_is_not_negative, Label::kNear);
+ __ negl(left_reg);
+ __ andl(left_reg, Immediate(divisor - 1));
+ __ negl(left_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ jmp(&done, Label::kNear);
+ }
+
+ __ bind(&left_is_not_negative);
+ __ andl(left_reg, Immediate(divisor - 1));
+ __ bind(&done);
+
+ } else {
+ Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(rax));
- ASSERT(result_reg.is(rdx));
+ Register right_reg = ToRegister(instr->right());
ASSERT(!right_reg.is(rax));
ASSERT(!right_reg.is(rdx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(rdx));
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (right->CanBeZero()) {
__ testl(right_reg, right_reg);
DeoptimizeIf(zero, instr->environment());
}
- __ testl(left_reg, left_reg);
- __ j(zero, &remainder_eq_dividend, Label::kNear);
- __ j(sign, &slow, Label::kNear);
-
- __ testl(right_reg, right_reg);
- __ j(not_sign, &both_positive, Label::kNear);
- // The sign of the divisor doesn't matter.
- __ neg(right_reg);
-
- __ bind(&both_positive);
- // If the dividend is smaller than the nonnegative
- // divisor, the dividend is the result.
- __ cmpl(left_reg, right_reg);
- __ j(less, &remainder_eq_dividend, Label::kNear);
-
- // Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->temp());
- __ movl(scratch, right_reg);
- __ subl(scratch, Immediate(1));
- __ testl(scratch, right_reg);
- __ j(not_zero, &slow, Label::kNear);
- __ andl(left_reg, scratch);
- __ jmp(&remainder_eq_dividend, Label::kNear);
-
- // Slow case, using idiv instruction.
- __ bind(&slow);
-
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ Label no_overflow_possible;
__ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int, Label::kNear);
+ __ j(not_zero, &no_overflow_possible, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Set(result_reg, 0);
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&no_overflow_possible);
}
- // Sign extend eax to edx.
- // (We are using only the low 32 bits of the values.)
+ // Sign extend dividend in eax into edx:eax, since we are using only the low
+ // 32 bits of the values.
__ cdq();
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (left->CanBeNegative() &&
+ hmod->CanBeZero() &&
+ hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive_left;
- Label done;
__ testl(left_reg, left_reg);
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
__ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
-
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
__ bind(&positive_left);
- __ idivl(right_reg);
- __ bind(&done);
- } else {
- __ idivl(right_reg);
}
- __ jmp(&done, Label::kNear);
-
- __ bind(&remainder_eq_dividend);
- __ movl(result_reg, left_reg);
-
+ __ idivl(right_reg);
__ bind(&done);
}
}
@@ -1254,10 +1270,26 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
if (test_value != 0) {
- // Deoptimize if remainder is not 0.
- __ testl(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sarl(dividend, Immediate(power));
+ if (instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ Label done, negative;
+ __ cmpl(dividend, Immediate(0));
+ __ j(less, &negative, Label::kNear);
+ __ sarl(dividend, Immediate(power));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&negative);
+ __ negl(dividend);
+ __ sarl(dividend, Immediate(power));
+ if (divisor > 0) __ negl(dividend);
+ __ bind(&done);
+ return; // Don't fall through to "__ neg" below.
+ } else {
+ // Deoptimize if remainder is not 0.
+ __ testl(dividend, Immediate(test_value));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ sarl(dividend, Immediate(power));
+ }
}
if (divisor < 0) __ negl(dividend);
@@ -1304,11 +1336,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cdq();
__ idivl(right_reg);
- if (!instr->is_flooring()) {
- // Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
- DeoptimizeIf(not_zero, instr->environment());
- } else {
+ if (instr->is_flooring()) {
Label done;
__ testl(rdx, rdx);
__ j(zero, &done, Label::kNear);
@@ -1316,6 +1344,11 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ sarl(rdx, Immediate(31));
__ addl(rax, rdx);
__ bind(&done);
+ } else if (!instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ // Deoptimize if remainder is not 0.
+ __ testl(rdx, rdx);
+ DeoptimizeIf(not_zero, instr->environment());
}
}
@@ -1548,11 +1581,15 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
__ Set(ToRegister(instr->result()), instr->value());
}
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ Move(ToRegister(instr->result()), instr->value());
+}
+
+
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
@@ -1572,7 +1609,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (value->IsSmi()) {
__ Move(ToRegister(instr->result()), value);
} else {
@@ -1666,11 +1703,32 @@ void LCodeGen::DoDateField(LDateField* instr) {
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
+ Register string = ToRegister(instr->string());
+ Register index = ToRegister(instr->index());
+ Register value = ToRegister(instr->value());
+ String::Encoding encoding = instr->encoding();
+
+ if (FLAG_debug_code) {
+ __ push(value);
+ __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, "Unexpected string type");
+ __ pop(value);
+ }
+
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ } else {
+ __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
+ value);
+ }
}
@@ -1874,10 +1932,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
+ ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ testl(reg, reg);
EmitBranch(true_block, false_block, not_zero);
+ } else if (r.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ Register reg = ToRegister(instr->value());
+ __ testq(reg, reg);
+ EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
+ ASSERT(!info()->IsStub());
XMMRegister reg = ToDoubleRegister(instr->value());
__ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
@@ -1887,9 +1952,11 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
EmitBranch(true_block, false_block, equal);
} else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
__ SmiCompare(reg, Smi::FromInt(0));
EmitBranch(true_block, false_block, not_equal);
} else {
@@ -2043,16 +2110,32 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
int32_t value;
if (right->IsConstantOperand()) {
value = ToInteger32(LConstantOperand::cast(right));
- __ cmpl(ToRegister(left), Immediate(value));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ Cmp(ToRegister(left), Smi::FromInt(value));
+ } else {
+ __ cmpl(ToRegister(left), Immediate(value));
+ }
} else if (left->IsConstantOperand()) {
value = ToInteger32(LConstantOperand::cast(left));
- if (right->IsRegister()) {
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (right->IsRegister()) {
+ __ Cmp(ToRegister(right), Smi::FromInt(value));
+ } else {
+ __ Cmp(ToOperand(right), Smi::FromInt(value));
+ }
+ } else if (right->IsRegister()) {
__ cmpl(ToRegister(right), Immediate(value));
} else {
__ cmpl(ToOperand(right), Immediate(value));
}
// We transposed the operands. Reverse the condition.
cc = ReverseCondition(cc);
+ } else if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (right->IsRegister()) {
+ __ cmpq(ToRegister(left), ToRegister(right));
+ } else {
+ __ cmpq(ToRegister(left), ToOperand(right));
+ }
} else {
if (right->IsRegister()) {
__ cmpl(ToRegister(left), ToRegister(right));
@@ -2072,9 +2155,11 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
if (instr->right()->IsConstantOperand()) {
- __ Cmp(left, ToHandle(LConstantOperand::cast(instr->right())));
+ Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
+ __ CmpObject(left, right);
} else {
- __ cmpq(left, ToRegister(instr->right()));
+ Register right = ToRegister(instr->right());
+ __ cmpq(left, right);
}
EmitBranch(true_block, false_block, equal);
}
@@ -2667,7 +2752,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- int offset = instr->hydrogen()->offset();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Register object = ToRegister(instr->object());
if (FLAG_track_double_fields &&
instr->hydrogen()->representation().IsDouble()) {
@@ -2677,7 +2763,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
+ if (access.IsInobject()) {
__ movq(result, FieldOperand(object, offset));
} else {
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
@@ -2767,7 +2853,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
Label check_passed;
- __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CompareMap(object, map, &check_passed);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
__ bind(&check_passed);
@@ -2889,7 +2975,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
// gets replaced during bound check elimination with the index argument
// to the bounds check, which can be tagged, so that case must be
// handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
@@ -2962,7 +3048,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
@@ -3004,7 +3090,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// gets replaced during bound check elimination with the index
// argument to the bounds check, which can be tagged, so that
// case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
@@ -3614,7 +3700,10 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
- if (exponent_type.IsTagged()) {
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(exponent, &no_deopt);
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
@@ -3868,14 +3957,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ Set(rax, instr->arity());
__ Move(rbx, instr->hydrogen()->property_cell());
ElementsKind kind = instr->hydrogen()->elements_kind();
+ bool disable_allocation_sites =
+ (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind);
+ ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
- ArraySingleArgumentConstructorStub stub(kind);
+ ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else {
- ArrayNArgumentsConstructorStub stub(kind);
+ ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -3898,19 +3990,17 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
- int offset = instr->offset();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Handle<Map> transition = instr->transition();
if (FLAG_track_fields && representation.IsSmi()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsInteger32Constant(operand_value)) {
+ if (!IsSmiConstant(operand_value)) {
DeoptimizeIf(no_condition, instr->environment());
}
- } else {
- Register value = ToRegister(instr->value());
- __ Integer32ToSmi(value, value);
}
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
if (instr->value()->IsConstantOperand()) {
@@ -3927,7 +4017,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
- ASSERT(instr->is_in_object());
+ ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
XMMRegister value = ToDoubleRegister(instr->value());
__ movsd(FieldOperand(object, offset), value);
@@ -3961,19 +4051,14 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Register write_register = object;
- if (!instr->is_in_object()) {
+ if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
__ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32Constant(operand_value)) {
- // In lithium register preparation, we made sure that the constant integer
- // operand fits into smi range.
- Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
- __ Move(FieldOperand(write_register, offset), smi_value);
- } else if (operand_value->IsRegister()) {
+ if (operand_value->IsRegister()) {
__ movq(FieldOperand(write_register, offset),
ToRegister(operand_value));
} else {
@@ -3987,7 +4072,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->hydrogen()->NeedsWriteBarrier()) {
Register value = ToRegister(instr->value());
- Register temp = instr->is_in_object() ? ToRegister(instr->temp()) : object;
+ Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(write_register,
offset,
@@ -4017,20 +4102,20 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->length()->IsRegister()) {
Register reg = ToRegister(instr->length());
- if (!instr->hydrogen()->length()->representation().IsTagged()) {
+ if (!instr->hydrogen()->length()->representation().IsSmi()) {
__ AssertZeroExtended(reg);
}
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
__ Cmp(reg, Smi::FromInt(constant_index));
} else {
__ cmpq(reg, Immediate(constant_index));
}
} else {
Register reg2 = ToRegister(instr->index());
- if (!instr->hydrogen()->index()->representation().IsTagged()) {
+ if (!instr->hydrogen()->index()->representation().IsSmi()) {
__ AssertZeroExtended(reg2);
}
__ cmpq(reg, reg2);
@@ -4040,7 +4125,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
__ Cmp(length, Smi::FromInt(constant_index));
} else {
__ cmpq(length, Immediate(constant_index));
@@ -4063,7 +4148,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
// gets replaced during bound check elimination with the index
// argument to the bounds check, which can be tagged, so that case
// must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
@@ -4127,7 +4212,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
// input gets replaced during bound check elimination with the index
// argument to the bounds check, which can be tagged, so that case
// must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
@@ -4170,7 +4255,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
// input gets replaced during bound check elimination with the index
// argument to the bounds check, which can be tagged, so that case
// must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
@@ -4434,6 +4519,18 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* output = instr->result();
+ __ Integer32ToSmi(ToRegister(output), ToRegister(input));
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+}
+
+
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4615,29 +4712,36 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
+ NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- if (deoptimize_on_undefined) {
+ if (!allow_undefined_as_nan) {
DeoptimizeIf(not_equal, env);
} else {
- Label heap_number;
+ Label heap_number, convert;
__ j(equal, &heap_number, Label::kNear);
+ // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
+ __ j(equal, &convert, Label::kNear);
+ __ CompareRoot(input_reg, Heap::kTheHoleValueRootIndex);
+ }
DeoptimizeIf(not_equal, env);
- // Convert undefined to NaN. Compute NaN as 0/0.
+ __ bind(&convert);
__ xorps(result_reg, result_reg);
__ divsd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
@@ -4656,16 +4760,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ testq(input_reg, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, env);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ testq(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi);
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::hole_nan_as_double()));
- __ movq(result_reg, kScratchRegister);
- __ jmp(&done, Label::kNear);
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -4760,24 +4854,16 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ } else if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
}
}
EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->allow_undefined_as_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment(),
mode);
@@ -4826,6 +4912,41 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
}
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+ CpuFeatureScope scope(masm(), SSE2);
+
+ XMMRegister input_reg = ToDoubleRegister(input);
+ Register result_reg = ToRegister(result);
+
+ Label done;
+ __ cvttsd2si(result_reg, input_reg);
+ __ cvtlsi2sd(xmm0, result_reg);
+ __ ucomisd(xmm0, input_reg);
+ DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ __ testl(result_reg, result_reg);
+ __ j(not_zero, &done, Label::kNear);
+ __ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // deoptimize.
+ __ andl(result_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ bind(&done);
+ }
+ __ Integer32ToSmi(result_reg, result_reg);
+ DeoptimizeIf(overflow, instr->environment());
+}
+
+
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
@@ -4889,25 +5010,16 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Register reg = ToRegister(instr->value());
Handle<JSFunction> target = instr->hydrogen()->target();
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
- if (isolate()->heap()->InNewSpace(*target)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ cmpq(reg, Operand(kScratchRegister, 0));
- } else {
- __ Cmp(reg, target);
- }
+ __ CmpHeapObject(reg, target);
DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
- CompareMapMode mode,
LInstruction* instr) {
Label success;
- __ CompareMap(reg, map, &success, mode);
+ __ CompareMap(reg, map, &success);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&success);
}
@@ -4922,11 +5034,11 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
SmallMapList* map_set = instr->hydrogen()->map_set();
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
+ __ CompareMap(reg, map, &success);
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
+ DoCheckMapCommon(reg, map, instr);
__ bind(&success);
}
@@ -4995,100 +5107,12 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
} else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr);
+ DoCheckMapCommon(reg, maps->at(i), instr);
}
}
}
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- Immediate(instance_size >> kPointerSizeLog2));
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- Immediate(initial_map->pre_allocated_property_fields()));
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- Immediate(initial_map->unused_property_fields()));
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- Immediate(initial_map->inobject_properties()));
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ movq(FieldOperand(result, JSObject::kMapOffset), map);
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ movq(FieldOperand(result, property_offset), scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Push(Smi::FromInt(instance_size));
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -5112,8 +5136,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
}
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
@@ -5146,11 +5174,12 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
- CallRuntimeFromDeferred(
- Runtime::kAllocateInOldPointerSpace, 1, instr);
+ ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace());
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
+ } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) {
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
} else {
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
}
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -5243,7 +5272,7 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (object->IsSmi()) {
__ Push(Handle<Smi>::cast(object));
} else {
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index d0dd90eeb6..07a948c113 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -104,7 +104,9 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsSmiConstant(LConstantOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
@@ -128,13 +130,11 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LInstruction* instr);
+ void DoCheckMapCommon(Register reg, Handle<Map> map, LInstruction* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -294,7 +294,7 @@ class LCodeGen BASE_EMBEDDED {
void EmitNumberUntagD(
Register input,
XMMRegister result,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index 22183a2f8d..fd74e0aacd 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -195,7 +195,9 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32Constant(constant_source)) {
+ if (cgen_->IsSmiConstant(constant_source)) {
+ __ Move(dst, cgen_->ToSmi(constant_source));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
@@ -203,7 +205,9 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsInteger32Constant(constant_source)) {
+ if (cgen_->IsSmiConstant(constant_source)) {
+ __ Move(dst, cgen_->ToSmi(constant_source));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
// Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
// value.
__ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 1217a4000d..cb0659d24d 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -391,8 +391,7 @@ LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ hydrogen()->access().PrintTo(stream);
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -428,7 +427,14 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
} else {
stream->Add("] <- ");
}
- value()->PrintTo(stream);
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
}
@@ -707,6 +713,12 @@ LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
}
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -719,9 +731,9 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ if (instr->representation().IsSmiOrTagged()) {
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* left = UseFixed(instr->left(), rdx);
LOperand* right = UseFixed(instr->right(), rax);
@@ -789,8 +801,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
+ ASSERT(left->representation().IsSmiOrTagged());
+ ASSERT(right->representation().IsSmiOrTagged());
LOperand* left_operand = UseFixed(left, rdx);
LOperand* right_operand = UseFixed(right, rax);
LArithmeticT* result =
@@ -1309,9 +1321,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* left = UseFixed(instr->left(), rdx);
LOperand* right = UseFixed(instr->right(), rax);
@@ -1350,7 +1362,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, rax));
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1425,43 +1437,54 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LInstruction* result;
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod =
- new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
- result = DefineSameAsFirst(mod);
+ ASSERT(!right->CanBeZero());
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseOrConstant(right),
+ NULL);
+ LInstruction* result = DefineSameAsFirst(mod);
+ return (left->CanBeNegative() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero))
+ ? AssignEnvironment(result)
+ : result;
+ } else if (instr->has_fixed_right_arg()) {
+ LModI* mod = new(zone()) LModI(UseRegister(left),
+ UseRegisterAtStart(right),
+ NULL);
+ return AssignEnvironment(DefineSameAsFirst(mod));
} else {
// The temporary operand is necessary to ensure that right is not
// allocated into edx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* value = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new(zone()) LModI(value, divisor, temp);
- result = DefineFixed(mod, rdx);
+ LModI* mod = new(zone()) LModI(UseFixed(left, rax),
+ UseRegister(right),
+ FixedTemp(rdx));
+ LInstruction* result = DefineFixed(mod, rdx);
+ return (right->CanBeZero() ||
+ (left->RangeCanInclude(kMinInt) &&
+ right->RangeCanInclude(-1) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
+ (left->CanBeNegative() &&
+ instr->CanBeZero() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)))
+ ? AssignEnvironment(result)
+ : result;
}
-
- return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kCanOverflow))
- ? AssignEnvironment(result)
- : result;
- } else if (instr->representation().IsTagged()) {
+ } else if (instr->representation().IsSmiOrTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
+ // We call a C function for double modulo. It can't trigger a GC. We need to
+ // use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
+ UseFixedDouble(left, xmm2),
+ UseFixedDouble(right, xmm1));
+ return MarkAsCall(DefineFixedDouble(mod, xmm1), instr);
}
}
@@ -1481,7 +1504,7 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
@@ -1502,7 +1525,7 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@@ -1534,7 +1557,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::ADD, instr);
}
return NULL;
@@ -1598,9 +1621,10 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(
+ instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCmpIDAndBranch(left, right);
@@ -1812,6 +1836,13 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
// Only mark conversions that might need to allocate as calling rather than
// all changes. This makes simple, non-allocating conversion not have to force
// building a stack frame.
@@ -1821,6 +1852,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
@@ -1843,10 +1881,15 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToI(value)));
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
@@ -1863,6 +1906,15 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LInteger32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return result;
+ }
+ return AssignEnvironment(result);
} else {
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
LOperand* temp = FixedTemp(xmm1);
@@ -1900,18 +1952,6 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
}
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
@@ -1934,7 +1974,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
} else if (input_rep.IsInteger32()) {
return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
} else {
- ASSERT(input_rep.IsTagged());
+ ASSERT(input_rep.IsSmiOrTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve xmm1 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
@@ -1953,7 +1993,9 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
LOperand* temp = TempRegister();
@@ -2073,9 +2115,9 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
+ instr->key()->representation().IsSmi());
ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = instr->key()->representation().IsTagged();
+ bool clobbers_key = instr->key()->representation().IsSmi();
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
@@ -2116,7 +2158,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = instr->key()->representation().IsTagged();
+ bool clobbers_key = instr->key()->representation().IsSmi();
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
@@ -2131,7 +2173,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
key = clobbers_key ? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
} else {
- ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
object = UseTempRegister(instr->elements());
if (needs_write_barrier) {
val = UseTempRegister(instr->value());
@@ -2223,13 +2265,14 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
- obj = instr->is_in_object()
+ obj = is_in_object
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
@@ -2258,14 +2301,15 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
+ LOperand* temp = (!is_in_object || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject())) {
- return AssignEnvironment(result);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
}
return result;
}
@@ -2309,13 +2353,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 747d8e73d4..1121af50e1 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -50,7 +50,6 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
- V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -87,6 +86,7 @@ class LCodeGen;
V(CmpT) \
V(ConstantD) \
V(ConstantI) \
+ V(ConstantS) \
V(ConstantT) \
V(Context) \
V(DebugBreak) \
@@ -95,6 +95,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleToSmi) \
V(DummyUse) \
V(ElementsKind) \
V(FixedArrayBaseLength) \
@@ -112,6 +113,7 @@ class LCodeGen;
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Integer32ToSmi) \
V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
@@ -1134,6 +1136,15 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
+class LConstantS: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
class LConstantD: public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
@@ -1887,6 +1898,19 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
+class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
@@ -1958,6 +1982,19 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
};
+class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
// Truncating conversion from a tagged value to an int32.
class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
public:
@@ -2035,9 +2072,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream);
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
Representation representation() const {
return hydrogen()->field_representation();
@@ -2265,7 +2299,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
};
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2328,19 +2362,6 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LAllocateObject(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
class LAllocate: public LTemplateInstruction<1, 1, 1> {
public:
LAllocate(LOperand* size, LOperand* temp) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 31796b1283..46e2c694e8 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -2295,20 +2295,22 @@ void MacroAssembler::Move(Register dst, Register src) {
void MacroAssembler::Move(Register dst, Handle<Object> source) {
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
+ ASSERT(source->IsHeapObject());
movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
}
}
void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
+ ASSERT(source->IsHeapObject());
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
movq(dst, kScratchRegister);
}
@@ -2316,18 +2318,19 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- Move(kScratchRegister, source);
+ ASSERT(source->IsHeapObject());
+ movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
cmpq(dst, kScratchRegister);
}
}
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
@@ -2339,7 +2342,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
void MacroAssembler::Push(Handle<Object> source) {
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Push(Smi::cast(*source));
} else {
@@ -2352,7 +2355,7 @@ void MacroAssembler::Push(Handle<Object> source) {
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
@@ -2364,8 +2367,21 @@ void MacroAssembler::LoadHeapObject(Register result,
}
+void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ cmpq(reg, Operand(kScratchRegister, 0));
+ } else {
+ Cmp(reg, object);
+ }
+}
+
+
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
@@ -2381,7 +2397,7 @@ void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
void MacroAssembler::LoadGlobalCell(Register dst,
Handle<JSGlobalPropertyCell> cell) {
if (dst.is(rax)) {
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
} else {
movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
@@ -2853,38 +2869,21 @@ void MacroAssembler::StoreNumberToDoubleElements(
void MacroAssembler::CompareMap(Register obj,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
+ Label* early_success) {
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- j(equal, early_success, Label::kNear);
- Cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(current_map));
- }
- }
- }
}
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
- CompareMap(obj, map, &success, mode);
+ CompareMap(obj, map, &success);
j(not_equal, fail);
bind(&success);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 6c8d5ff730..c10cbc65fe 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -788,10 +788,11 @@ class MacroAssembler: public Assembler {
// Load a heap object and handle the case of new-space objects by
// indirecting via a global cell.
void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void CmpHeapObject(Register reg, Handle<HeapObject> object);
void PushHeapObject(Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
@@ -799,6 +800,15 @@ class MacroAssembler: public Assembler {
}
}
+ void CmpObject(Register reg, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ CmpHeapObject(reg, Handle<HeapObject>::cast(object));
+ } else {
+ Cmp(reg, object);
+ }
+ }
+
// Load a global cell into a register.
void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
@@ -898,8 +908,7 @@ class MacroAssembler: public Assembler {
// sequences branches to early_success.
void CompareMap(Register obj,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* early_success);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -908,8 +917,7 @@ class MacroAssembler: public Assembler {
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 012dcc8b62..efb2a65a5f 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -120,7 +120,7 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(Isolate::Current(), NULL, kRegExpCodeSize),
+ masm_(zone->isolate(), NULL, kRegExpCodeSize),
no_root_array_scope_(&masm_),
code_relative_fixup_positions_(4, zone),
mode_(mode),
@@ -226,101 +226,6 @@ void RegExpMacroAssemblerX64::CheckCharacterLT(uc16 limit, Label* on_less) {
}
-void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
-#ifdef DEBUG
- // If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ASCII character.
- if (mode_ == ASCII) {
- ASSERT(String::IsOneByte(str.start(), str.length()));
- }
-#endif
- int byte_length = str.length() * char_size();
- int byte_offset = cp_offset * char_size();
- if (check_end_of_string) {
- // Check that there are at least str.length() characters left in the input.
- __ cmpl(rdi, Immediate(-(byte_offset + byte_length)));
- BranchOrBacktrack(greater, on_failure);
- }
-
- if (on_failure == NULL) {
- // Instead of inlining a backtrack, (re)use the global backtrack target.
- on_failure = &backtrack_label_;
- }
-
- // Do one character test first to minimize loading for the case that
- // we don't match at all (loading more than one character introduces that
- // chance of reading unaligned and reading across cache boundaries).
- // If the first character matches, expect a larger chance of matching the
- // string, and start loading more characters at a time.
- if (mode_ == ASCII) {
- __ cmpb(Operand(rsi, rdi, times_1, byte_offset),
- Immediate(static_cast<int8_t>(str[0])));
- } else {
- // Don't use 16-bit immediate. The size changing prefix throws off
- // pre-decoding.
- __ movzxwl(rax,
- Operand(rsi, rdi, times_1, byte_offset));
- __ cmpl(rax, Immediate(static_cast<int32_t>(str[0])));
- }
- BranchOrBacktrack(not_equal, on_failure);
-
- __ lea(rbx, Operand(rsi, rdi, times_1, 0));
- for (int i = 1, n = str.length(); i < n; ) {
- if (mode_ == ASCII) {
- if (i + 8 <= n) {
- uint64_t combined_chars =
- (static_cast<uint64_t>(str[i + 0]) << 0) ||
- (static_cast<uint64_t>(str[i + 1]) << 8) ||
- (static_cast<uint64_t>(str[i + 2]) << 16) ||
- (static_cast<uint64_t>(str[i + 3]) << 24) ||
- (static_cast<uint64_t>(str[i + 4]) << 32) ||
- (static_cast<uint64_t>(str[i + 5]) << 40) ||
- (static_cast<uint64_t>(str[i + 6]) << 48) ||
- (static_cast<uint64_t>(str[i + 7]) << 56);
- __ movq(rax, combined_chars, RelocInfo::NONE64);
- __ cmpq(rax, Operand(rbx, byte_offset + i));
- i += 8;
- } else if (i + 4 <= n) {
- uint32_t combined_chars =
- (static_cast<uint32_t>(str[i + 0]) << 0) ||
- (static_cast<uint32_t>(str[i + 1]) << 8) ||
- (static_cast<uint32_t>(str[i + 2]) << 16) ||
- (static_cast<uint32_t>(str[i + 3]) << 24);
- __ cmpl(Operand(rbx, byte_offset + i), Immediate(combined_chars));
- i += 4;
- } else {
- __ cmpb(Operand(rbx, byte_offset + i),
- Immediate(static_cast<int8_t>(str[i])));
- i++;
- }
- } else {
- ASSERT(mode_ == UC16);
- if (i + 4 <= n) {
- uint64_t combined_chars = *reinterpret_cast<const uint64_t*>(&str[i]);
- __ movq(rax, combined_chars, RelocInfo::NONE64);
- __ cmpq(rax,
- Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
- i += 4;
- } else if (i + 2 <= n) {
- uint32_t combined_chars = *reinterpret_cast<const uint32_t*>(&str[i]);
- __ cmpl(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
- Immediate(combined_chars));
- i += 2;
- } else {
- __ movzxwl(rax,
- Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
- __ cmpl(rax, Immediate(str[i]));
- i++;
- }
- }
- BranchOrBacktrack(not_equal, on_failure);
- }
-}
-
-
void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
Label fallthrough;
__ cmpl(rdi, Operand(backtrack_stackpointer(), 0));
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index 296c866019..b230ea47fc 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -55,10 +55,6 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 148f65ee0d..06d8f7108b 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -449,12 +449,13 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// (first fast api call extra argument)
// -- rsp[24] : api call data
// -- rsp[32] : isolate
- // -- rsp[40] : ReturnValue
+ // -- rsp[40] : ReturnValue default value
+ // -- rsp[48] : ReturnValue
//
- // -- rsp[48] : last argument
+ // -- rsp[56] : last argument
// -- ...
- // -- rsp[(argc + 5) * 8] : first argument
- // -- rsp[(argc + 6) * 8] : receiver
+ // -- rsp[(argc + 6) * 8] : first argument
+ // -- rsp[(argc + 7) * 8] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@@ -477,9 +478,10 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister);
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(Operand(rsp, 5 * kPointerSize), kScratchRegister);
+ __ movq(Operand(rsp, 6 * kPointerSize), kScratchRegister);
// Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 5);
+ STATIC_ASSERT(kFastApiCallArguments == 6);
__ lea(rbx, Operand(rsp, kFastApiCallArguments * kPointerSize));
// Function address is a foreign pointer outside V8's heap.
@@ -763,7 +765,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Label* slow) {
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ miss_label, DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -830,7 +832,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
- miss_restore_name, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ miss_restore_name, DONT_DO_SMI_CHECK);
__ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
@@ -880,6 +882,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
index -= object->map()->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -898,7 +902,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
ASSERT(storage_reg.is(name_reg));
}
__ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
@@ -920,7 +925,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
ASSERT(storage_reg.is(name_reg));
}
__ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -943,7 +949,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Label* miss_label) {
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ miss_label, DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -988,9 +994,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
- miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ miss_label, DONT_DO_SMI_CHECK);
__ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
-
__ bind(&do_store);
__ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
// Return the value (register rax).
@@ -1000,6 +1005,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
// TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -1010,7 +1017,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, value_reg);
__ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
@@ -1024,7 +1032,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, value_reg);
__ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -1126,8 +1135,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Check access rights to the global object. This has to happen after
@@ -1162,8 +1170,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CheckMap(reg, Handle<Map>(holder->map()), miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
@@ -1298,9 +1305,10 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
} else {
__ Push(Handle<Object>(callback->data(), isolate()));
}
- __ PushAddress(ExternalReference::isolate_address(isolate()));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ push(kScratchRegister); // return value
+ __ push(kScratchRegister); // return value default
+ __ PushAddress(ExternalReference::isolate_address(isolate()));
__ push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const ExecutableAccessorInfo& to the C++ callback.
@@ -1332,8 +1340,8 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
const int kArgStackSpace = 1;
__ PrepareCallApiFunction(kArgStackSpace, returns_handle);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 5);
- __ lea(rax, Operand(name_arg, 5 * kPointerSize));
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+ __ lea(rax, Operand(name_arg, 6 * kPointerSize));
// v8::AccessorInfo::args_.
__ movq(StackSpaceOperand(0), rax);
@@ -1345,7 +1353,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ CallApiFunctionAndReturn(getter_address,
kStackSpace,
returns_handle,
- 3);
+ 5);
}
@@ -2666,8 +2674,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Label miss;
// Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -2957,139 +2964,6 @@ Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
}
-// Specialized stub for constructing objects from functions which only have only
-// simple assignments of the form this.x = ...; in their body.
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[4] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r8 for holding undefined which is used in several places below.
- __ Move(r8, factory()->undefined_value());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmpq(rbx, r8);
- __ j(not_equal, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rbx, &generic_stub_call);
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ j(not_equal, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // rbx: initial map
- __ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
- __ Check(not_equal, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // rbx: initial map
- ASSERT(function->has_initial_map());
- int instance_size = function->initial_map()->instance_size();
-#ifdef DEBUG
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ shl(rcx, Immediate(kPointerSizeLog2));
- __ cmpq(rcx, Immediate(instance_size));
- __ Check(equal, "Instance size of initial map changed.");
-#endif
- __ Allocate(instance_size, rdx, rcx, no_reg, &generic_stub_call,
- NO_ALLOCATION_FLAGS);
-
- // Allocated the JSObject, now initialize the fields and add the heap tag.
- // rbx: initial map
- // rdx: JSObject (untagged)
- __ movq(Operand(rdx, JSObject::kMapOffset), rbx);
- __ Move(rbx, factory()->empty_fixed_array());
- __ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
- __ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
-
- // rax: argc
- // rdx: JSObject (untagged)
- // Load the address of the first in-object property into r9.
- __ lea(r9, Operand(rdx, JSObject::kHeaderSize));
- // Calculate the location of the first argument. The stack contains only the
- // return address on top of the argc arguments.
- __ lea(rcx, Operand(rsp, rax, times_pointer_size, 0));
-
- // rax: argc
- // rcx: first argument
- // rdx: JSObject (untagged)
- // r8: undefined
- // r9: first in-object property of the JSObject
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- // Check if the argument assigned to the property is actually passed.
- // If argument is not passed the property is set to undefined,
- // otherwise find it on the stack.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ movq(rbx, r8);
- __ cmpq(rax, Immediate(arg_number));
- __ cmovq(above, rbx, Operand(rcx, arg_number * -kPointerSize));
- // Store value in the property.
- __ movq(Operand(r9, i * kPointerSize), rbx);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
- isolate());
- __ Move(Operand(r9, i * kPointerSize), constant);
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ movq(Operand(r9, i * kPointerSize), r8);
- }
-
- // rax: argc
- // rdx: JSObject (untagged)
- // Move argc to rbx and the JSObject to return to rax and tag it.
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- __ or_(rax, Immediate(kHeapObjectTag));
-
- // rax: JSObject
- // rbx: argc
- // Remove caller arguments and receiver from the stack and return.
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ IncrementCounter(counters->constructed_objects_stub(), 1);
- __ ret(0);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 5507ac6f31..94dcce1305 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include <v8.h>
#include "cctest.h"
#include "debug.h"
@@ -70,14 +67,19 @@ void CcTest::InitializeVM(CcTestExtensionFlags extensions) {
if (extensions.Contains(Name##_ID)) extension_names[extension_count++] = Id;
EXTENSION_LIST(CHECK_EXTENSION_FLAG)
#undef CHECK_EXTENSION_FLAG
+ v8::Isolate* isolate = default_isolate();
if (context_.IsEmpty()) {
- v8::Isolate* isolate = default_isolate();
v8::HandleScope scope(isolate);
v8::ExtensionConfiguration config(extension_count, extension_names);
v8::Local<v8::Context> context = v8::Context::New(isolate, &config);
- context_ = v8::Persistent<v8::Context>::New(isolate, context);
+ context_.Reset(isolate, context);
+ }
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, context_);
+ context->Enter();
}
- context_->Enter();
}
@@ -96,10 +98,21 @@ static void PrintTestList(CcTest* current) {
v8::Isolate* CcTest::default_isolate_;
+class CcTestArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ virtual void* Allocate(size_t length) { return malloc(length); }
+ virtual void Free(void* data) { free(data); }
+};
+
+
int main(int argc, char* argv[]) {
v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
v8::internal::FLAG_harmony_array_buffer = true;
v8::internal::FLAG_harmony_typed_arrays = true;
+
+ CcTestArrayBufferAllocator array_buffer_allocator;
+ v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
+
CcTest::set_default_isolate(v8::Isolate::GetCurrent());
CHECK(CcTest::default_isolate() != NULL);
int tests_run = 0;
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 0a91ed5e70..0811093011 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -99,10 +99,12 @@
'test-strtod.cc',
'test-thread-termination.cc',
'test-threads.cc',
+ 'test-types.cc',
'test-unbound-queue.cc',
'test-utils.cc',
'test-version.cc',
- 'test-weakmaps.cc'
+ 'test-weakmaps.cc',
+ 'test-weaktypedarrays.cc'
],
'conditions': [
['v8_target_arch=="ia32"', {
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index d1925dc257..59b3dc3511 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -34,14 +34,6 @@ test-api/Bug*: FAIL
# BUG(382): Weird test. Can't guarantee that it never times out.
test-api/ApplyInterruption: PASS || TIMEOUT
-# BUG(484): This test which we thought was originally corrected in r5236
-# is re-appearing. Disabled until bug in test is fixed. This only fails
-# when snapshot is on, so I am marking it PASS || FAIL
-test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
-
-# BUG(2628): These tests are flaky and sometimes fail, but should not crash.
-test-cpu-profiler/SampleWhenFrameIsNotSetup: PASS || FAIL
-
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
test-serialize/TestThatAlwaysFails: FAIL
@@ -53,15 +45,15 @@ test-debug/LiveEditDisabled: FAIL
# TODO(gc): Temporarily disabled in the GC branch.
test-log/EquivalenceOfLoggingAndTraversal: PASS || FAIL
-# BUG(1261): Flakey test.
-test-profile-generator/RecordStackTraceAtStartProfiling: PASS || FAIL
-
# We do not yet shrink weak maps after they have been emptied by the GC
test-weakmaps/Shrinking: FAIL
# Deferred stack trace formatting is temporarily disabled.
test-heap/ReleaseStackTraceData: PASS || FAIL
+# Boot up memory use is bloated in debug mode.
+test-mark-compact/BootUpMemoryUse: PASS, PASS || FAIL if $mode == debug
+
##############################################################################
[ $arch == arm ]
@@ -81,14 +73,14 @@ test-serialize/DeserializeAndRunScript2: SKIP
test-serialize/DeserializeFromSecondSerialization: SKIP
##############################################################################
-[ $arch == arm || $arch == mipsel ]
+[ $arch == mipsel ]
-# BUG(2628): Signal may come when pc is close to frame enter/exit code and on
-# simulator the stack frame is not set up when it is expected to be for the pc
-# value.
-test-cpu-profiler/CollectCpuProfile: PASS || FAIL
+# BUG(2628): The test sometimes fails on MIPS simulator.
test-cpu-profiler/SampleWhenFrameIsNotSetup: PASS || FAIL
+# BUG(2657): Test sometimes times out on MIPS simulator.
+test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate: PASS || TIMEOUT
+
##############################################################################
[ $arch == android_arm || $arch == android_ia32 ]
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index bc469aa520..d316c8e49f 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -117,25 +117,27 @@ const AccessorDescriptor kDescriptor = {
TEST(StressJS) {
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::Context> env = v8::Context::New(v8::Isolate::GetCurrent());
env->Enter();
Handle<JSFunction> function =
- FACTORY->NewFunction(FACTORY->function_string(), FACTORY->null_value());
+ factory->NewFunction(factory->function_string(), factory->null_value());
// Force the creation of an initial map and set the code to
// something empty.
- FACTORY->NewJSObject(function);
+ factory->NewJSObject(function);
function->ReplaceCode(Isolate::Current()->builtins()->builtin(
Builtins::kEmptyFunction));
// Patch the map to have an accessor for "get".
Handle<Map> map(function->initial_map());
Handle<DescriptorArray> instance_descriptors(map->instance_descriptors());
- Handle<Foreign> foreign = FACTORY->NewForeign(&kDescriptor);
+ Handle<Foreign> foreign = factory->NewForeign(&kDescriptor);
Handle<String> name =
- FACTORY->NewStringFromAscii(Vector<const char>("get", 3));
+ factory->NewStringFromAscii(Vector<const char>("get", 3));
ASSERT(instance_descriptors->IsEmpty());
- Handle<DescriptorArray> new_descriptors = FACTORY->NewDescriptorArray(0, 1);
+ Handle<DescriptorArray> new_descriptors = factory->NewDescriptorArray(0, 1);
v8::internal::DescriptorArray::WhitenessWitness witness(*new_descriptors);
map->set_instance_descriptors(*new_descriptors);
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index c8f67de0ab..5d3a79d6bb 100644..100755
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -32,14 +32,10 @@
#include <unistd.h> // getpid
#endif // WIN32
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "api.h"
+#include "arguments.h"
#include "isolate.h"
#include "compilation-cache.h"
#include "execution.h"
@@ -600,6 +596,7 @@ TEST(MakingExternalAsciiStringConditions) {
THREADED_TEST(UsingExternalString) {
+ i::Factory* factory = i::Isolate::Current()->factory();
{
v8::HandleScope scope(v8::Isolate::GetCurrent());
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
@@ -610,7 +607,7 @@ THREADED_TEST(UsingExternalString) {
HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
- FACTORY->InternalizedStringFromString(istring);
+ factory->InternalizedStringFromString(istring);
CHECK(isymbol->IsInternalizedString());
}
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -619,6 +616,7 @@ THREADED_TEST(UsingExternalString) {
THREADED_TEST(UsingExternalAsciiString) {
+ i::Factory* factory = i::Isolate::Current()->factory();
{
v8::HandleScope scope(v8::Isolate::GetCurrent());
const char* one_byte_string = "test string";
@@ -629,7 +627,7 @@ THREADED_TEST(UsingExternalAsciiString) {
HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
- FACTORY->InternalizedStringFromString(istring);
+ factory->InternalizedStringFromString(istring);
CHECK(isymbol->IsInternalizedString());
}
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -809,8 +807,16 @@ template<typename T>
static void CheckReturnValue(const T& t) {
v8::ReturnValue<v8::Value> rv = t.GetReturnValue();
i::Object** o = *reinterpret_cast<i::Object***>(&rv);
- CHECK_EQ(t.GetIsolate(), v8::Isolate::GetCurrent());
+ CHECK_EQ(v8::Isolate::GetCurrent(), t.GetIsolate());
+ CHECK_EQ(t.GetIsolate(), rv.GetIsolate());
+ CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
+ // Verify reset
+ bool is_runtime = (*o)->IsTheHole();
+ rv.Set(true);
+ CHECK(!(*o)->IsTheHole() && !(*o)->IsUndefined());
+ rv.Set(v8::Handle<v8::Object>());
CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
+ CHECK_EQ(is_runtime, (*o)->IsTheHole());
}
static v8::Handle<Value> handle_call(const v8::Arguments& args) {
@@ -820,6 +826,10 @@ static v8::Handle<Value> handle_call(const v8::Arguments& args) {
return v8_num(102);
}
+static v8::Handle<Value> handle_call_2(const v8::Arguments& args) {
+ return handle_call(args);
+}
+
static v8::Handle<Value> handle_call_indirect(const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
CheckReturnValue(args);
@@ -828,6 +838,10 @@ static v8::Handle<Value> handle_call_indirect(const v8::Arguments& args) {
return v8::Handle<Value>();
}
+static v8::Handle<Value> handle_call_indirect_2(const v8::Arguments& args) {
+ return handle_call_indirect(args);
+}
+
static void handle_callback(const v8::FunctionCallbackInfo<Value>& info) {
ApiTestFuzzer::Fuzz();
CheckReturnValue(info);
@@ -835,6 +849,9 @@ static void handle_callback(const v8::FunctionCallbackInfo<Value>& info) {
info.GetReturnValue().Set(v8_num(102));
}
+static void handle_callback_2(const v8::FunctionCallbackInfo<Value>& info) {
+ return handle_callback(info);
+}
static v8::Handle<Value> construct_call(const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
@@ -894,7 +911,8 @@ static void Return239Callback(
template<typename Handler>
-static void TestFunctionTemplateInitializer(Handler handler) {
+static void TestFunctionTemplateInitializer(Handler handler,
+ Handler handler_2) {
// Test constructor calls.
{
LocalContext env;
@@ -914,7 +932,7 @@ static void TestFunctionTemplateInitializer(Handler handler) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- fun_templ->SetCallHandler(handler);
+ fun_templ->SetCallHandler(handler_2);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("obj"), fun);
Local<Script> script = v8_compile("obj()");
@@ -952,9 +970,9 @@ static void TestFunctionTemplateAccessor(Constructor constructor,
THREADED_TEST(FunctionTemplate) {
- TestFunctionTemplateInitializer(handle_call);
- TestFunctionTemplateInitializer(handle_call_indirect);
- TestFunctionTemplateInitializer(handle_callback);
+ TestFunctionTemplateInitializer(handle_call, handle_call_2);
+ TestFunctionTemplateInitializer(handle_call_indirect, handle_call_indirect_2);
+ TestFunctionTemplateInitializer(handle_callback, handle_callback_2);
TestFunctionTemplateAccessor(construct_call, Return239);
TestFunctionTemplateAccessor(construct_call_indirect, Return239Indirect);
@@ -1014,47 +1032,72 @@ template<typename T>
void FastReturnValueCallback(const v8::FunctionCallbackInfo<v8::Value>& info);
// constant return values
-static const int32_t kFastReturnValueInt32 = 471;
-static const uint32_t kFastReturnValueUint32 = 571;
+static int32_t fast_return_value_int32 = 471;
+static uint32_t fast_return_value_uint32 = 571;
static const double kFastReturnValueDouble = 2.7;
// variable return values
static bool fast_return_value_bool = false;
-static bool fast_return_value_void_is_null = false;
+enum ReturnValueOddball {
+ kNullReturnValue,
+ kUndefinedReturnValue,
+ kEmptyStringReturnValue
+};
+static ReturnValueOddball fast_return_value_void;
+static bool fast_return_value_object_is_empty = false;
template<>
void FastReturnValueCallback<int32_t>(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(info.GetIsolate(), kFastReturnValueInt32);
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(fast_return_value_int32);
}
template<>
void FastReturnValueCallback<uint32_t>(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(info.GetIsolate(), kFastReturnValueUint32);
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(fast_return_value_uint32);
}
template<>
void FastReturnValueCallback<double>(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(info.GetIsolate(), kFastReturnValueDouble);
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(kFastReturnValueDouble);
}
template<>
void FastReturnValueCallback<bool>(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(info.GetIsolate(), fast_return_value_bool);
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(fast_return_value_bool);
}
template<>
void FastReturnValueCallback<void>(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- if (fast_return_value_void_is_null) {
- info.GetReturnValue().SetNull(info.GetIsolate());
- } else {
- info.GetReturnValue().SetUndefined(info.GetIsolate());
+ CheckReturnValue(info);
+ switch (fast_return_value_void) {
+ case kNullReturnValue:
+ info.GetReturnValue().SetNull();
+ break;
+ case kUndefinedReturnValue:
+ info.GetReturnValue().SetUndefined();
+ break;
+ case kEmptyStringReturnValue:
+ info.GetReturnValue().SetEmptyString();
+ break;
}
}
+template<>
+void FastReturnValueCallback<Object>(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Handle<v8::Object> object;
+ if (!fast_return_value_object_is_empty) object = Object::New();
+ info.GetReturnValue().Set(object);
+}
+
template<typename T>
Handle<Value> TestFastReturnValues() {
LocalContext env;
@@ -1068,16 +1111,29 @@ Handle<Value> TestFastReturnValues() {
}
THREADED_TEST(FastReturnValues) {
+ LocalContext env;
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::Value> value;
- // check int_32
- value = TestFastReturnValues<int32_t>();
- CHECK(value->IsInt32());
- CHECK_EQ(kFastReturnValueInt32, value->Int32Value());
- // check uint32_t
- value = TestFastReturnValues<uint32_t>();
- CHECK(value->IsInt32());
- CHECK_EQ(kFastReturnValueUint32, value->Int32Value());
+ // check int32_t and uint32_t
+ int32_t int_values[] = {
+ 0, 234, -723,
+ i::Smi::kMinValue, i::Smi::kMaxValue
+ };
+ for (size_t i = 0; i < ARRAY_SIZE(int_values); i++) {
+ for (int modifier = -1; modifier <= 1; modifier++) {
+ int int_value = int_values[i] + modifier;
+ // check int32_t
+ fast_return_value_int32 = int_value;
+ value = TestFastReturnValues<int32_t>();
+ CHECK(value->IsInt32());
+ CHECK(fast_return_value_int32 == value->Int32Value());
+ // check uint32_t
+ fast_return_value_uint32 = static_cast<uint32_t>(int_value);
+ value = TestFastReturnValues<uint32_t>();
+ CHECK(value->IsUint32());
+ CHECK(fast_return_value_uint32 == value->Uint32Value());
+ }
+ }
// check double
value = TestFastReturnValues<double>();
CHECK(value->IsNumber());
@@ -1090,15 +1146,34 @@ THREADED_TEST(FastReturnValues) {
CHECK_EQ(fast_return_value_bool, value->ToBoolean()->Value());
}
// check oddballs
- for (int i = 0; i < 2; i++) {
- fast_return_value_void_is_null = i == 0;
+ ReturnValueOddball oddballs[] = {
+ kNullReturnValue,
+ kUndefinedReturnValue,
+ kEmptyStringReturnValue
+ };
+ for (size_t i = 0; i < ARRAY_SIZE(oddballs); i++) {
+ fast_return_value_void = oddballs[i];
value = TestFastReturnValues<void>();
- if (fast_return_value_void_is_null) {
- CHECK(value->IsNull());
- } else {
- CHECK(value->IsUndefined());
+ switch (fast_return_value_void) {
+ case kNullReturnValue:
+ CHECK(value->IsNull());
+ break;
+ case kUndefinedReturnValue:
+ CHECK(value->IsUndefined());
+ break;
+ case kEmptyStringReturnValue:
+ CHECK(value->IsString());
+ CHECK_EQ(0, v8::String::Cast(*value)->Length());
+ break;
}
}
+ // check handles
+ fast_return_value_object_is_empty = false;
+ value = TestFastReturnValues<Object>();
+ CHECK(value->IsObject());
+ fast_return_value_object_is_empty = true;
+ value = TestFastReturnValues<Object>();
+ CHECK(value->IsUndefined());
}
@@ -1975,88 +2050,90 @@ THREADED_TEST(IndexedPropertyHandlerGetter) {
v8::Handle<v8::Object> bottom;
-static v8::Handle<Value> CheckThisIndexedPropertyHandler(
+static void CheckThisIndexedPropertyHandler(
uint32_t index,
- const AccessorInfo& info) {
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info);
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<Value>();
}
-static v8::Handle<Value> CheckThisNamedPropertyHandler(
+static void CheckThisNamedPropertyHandler(
Local<String> name,
- const AccessorInfo& info) {
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info);
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<Value>();
}
-
-v8::Handle<Value> CheckThisIndexedPropertySetter(uint32_t index,
- Local<Value> value,
- const AccessorInfo& info) {
+void CheckThisIndexedPropertySetter(
+ uint32_t index,
+ Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info);
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<Value>();
}
-v8::Handle<Value> CheckThisNamedPropertySetter(Local<String> property,
- Local<Value> value,
- const AccessorInfo& info) {
+void CheckThisNamedPropertySetter(
+ Local<String> property,
+ Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info);
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<Value>();
}
-v8::Handle<v8::Integer> CheckThisIndexedPropertyQuery(
+void CheckThisIndexedPropertyQuery(
uint32_t index,
- const AccessorInfo& info) {
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ CheckReturnValue(info);
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<v8::Integer>();
}
-v8::Handle<v8::Integer> CheckThisNamedPropertyQuery(Local<String> property,
- const AccessorInfo& info) {
+void CheckThisNamedPropertyQuery(
+ Local<String> property,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ CheckReturnValue(info);
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<v8::Integer>();
}
-v8::Handle<v8::Boolean> CheckThisIndexedPropertyDeleter(
+void CheckThisIndexedPropertyDeleter(
uint32_t index,
- const AccessorInfo& info) {
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+ CheckReturnValue(info);
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<v8::Boolean>();
}
-v8::Handle<v8::Boolean> CheckThisNamedPropertyDeleter(
+void CheckThisNamedPropertyDeleter(
Local<String> property,
- const AccessorInfo& info) {
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+ CheckReturnValue(info);
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<v8::Boolean>();
}
-v8::Handle<v8::Array> CheckThisIndexedPropertyEnumerator(
- const AccessorInfo& info) {
+void CheckThisIndexedPropertyEnumerator(
+ const v8::PropertyCallbackInfo<v8::Array>& info) {
+ CheckReturnValue(info);
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<v8::Array>();
}
-v8::Handle<v8::Array> CheckThisNamedPropertyEnumerator(
- const AccessorInfo& info) {
+void CheckThisNamedPropertyEnumerator(
+ const v8::PropertyCallbackInfo<v8::Array>& info) {
+ CheckReturnValue(info);
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<v8::Array>();
}
@@ -2493,7 +2570,20 @@ THREADED_TEST(SymbolProperties) {
}
-THREADED_TEST(ArrayBuffer) {
+class ScopedArrayBufferContents {
+ public:
+ explicit ScopedArrayBufferContents(
+ const v8::ArrayBuffer::Contents& contents)
+ : contents_(contents) {}
+ ~ScopedArrayBufferContents() { free(contents_.Data()); }
+ void* Data() const { return contents_.Data(); }
+ size_t ByteLength() const { return contents_.ByteLength(); }
+ private:
+ const v8::ArrayBuffer::Contents contents_;
+};
+
+
+THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
i::FLAG_harmony_array_buffer = true;
i::FLAG_harmony_typed_arrays = true;
@@ -2502,10 +2592,15 @@ THREADED_TEST(ArrayBuffer) {
v8::HandleScope handle_scope(isolate);
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(1024);
+ CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
+ CHECK(!ab->IsExternal());
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
- uint8_t* data = static_cast<uint8_t*>(ab->Data());
+ ScopedArrayBufferContents ab_contents(ab->Externalize());
+ CHECK(ab->IsExternal());
+
+ CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
+ uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
ASSERT(data != NULL);
env->Global()->Set(v8_str("ab"), ab);
@@ -2523,27 +2618,72 @@ THREADED_TEST(ArrayBuffer) {
data[1] = 0x11;
result = CompileRun("u8[0] + u8[1]");
CHECK_EQ(0xDD, result->Int32Value());
+}
+
- result = CompileRun("var ab1 = new ArrayBuffer(2);"
- "var u8_a = new Uint8Array(ab1);"
- "u8_a[0] = 0xAA;"
- "u8_a[1] = 0xFF; u8_a.buffer");
+THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
+ i::FLAG_harmony_array_buffer = true;
+ i::FLAG_harmony_typed_arrays = true;
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+
+ v8::Handle<v8::Value> result =
+ CompileRun("var ab1 = new ArrayBuffer(2);"
+ "var u8_a = new Uint8Array(ab1);"
+ "u8_a[0] = 0xAA;"
+ "u8_a[1] = 0xFF; u8_a.buffer");
Local<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::Cast(*result);
CHECK_EQ(2, static_cast<int>(ab1->ByteLength()));
- uint8_t* ab1_data = static_cast<uint8_t*>(ab1->Data());
- CHECK_EQ(0xAA, ab1_data[0]);
+ CHECK(!ab1->IsExternal());
+ ScopedArrayBufferContents ab1_contents(ab1->Externalize());
+ CHECK(ab1->IsExternal());
+
+ result = CompileRun("ab1.byteLength");
+ CHECK_EQ(2, result->Int32Value());
+ result = CompileRun("u8_a[0]");
+ CHECK_EQ(0xAA, result->Int32Value());
+ result = CompileRun("u8_a[1]");
+ CHECK_EQ(0xFF, result->Int32Value());
+ result = CompileRun("var u8_b = new Uint8Array(ab1);"
+ "u8_b[0] = 0xBB;"
+ "u8_a[0]");
+ CHECK_EQ(0xBB, result->Int32Value());
+ result = CompileRun("u8_b[1]");
+ CHECK_EQ(0xFF, result->Int32Value());
+
+ CHECK_EQ(2, static_cast<int>(ab1_contents.ByteLength()));
+ uint8_t* ab1_data = static_cast<uint8_t*>(ab1_contents.Data());
+ CHECK_EQ(0xBB, ab1_data[0]);
CHECK_EQ(0xFF, ab1_data[1]);
ab1_data[0] = 0xCC;
ab1_data[1] = 0x11;
result = CompileRun("u8_a[0] + u8_a[1]");
CHECK_EQ(0xDD, result->Int32Value());
+}
+
+
+THREADED_TEST(ArrayBuffer_External) {
+ i::FLAG_harmony_array_buffer = true;
+ i::FLAG_harmony_typed_arrays = true;
- uint8_t* my_data = new uint8_t[100];
- memset(my_data, 0, 100);
- Local<v8::ArrayBuffer> ab3 = v8::ArrayBuffer::New(my_data, 100);
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ i::ScopedVector<uint8_t> my_data(100);
+ memset(my_data.start(), 0, 100);
+ Local<v8::ArrayBuffer> ab3 = v8::ArrayBuffer::New(my_data.start(), 100);
CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
- CHECK_EQ(my_data, ab3->Data());
+ CHECK(ab3->IsExternal());
+
env->Global()->Set(v8_str("ab3"), ab3);
+
+ v8::Handle<v8::Value> result = CompileRun("ab3.byteLength");
+ CHECK_EQ(100, result->Int32Value());
+
result = CompileRun("var u8_b = new Uint8Array(ab3);"
"u8_b[0] = 0xBB;"
"u8_b[1] = 0xCC;"
@@ -2555,12 +2695,121 @@ THREADED_TEST(ArrayBuffer) {
my_data[1] = 0x11;
result = CompileRun("u8_b[0] + u8_b[1]");
CHECK_EQ(0xDD, result->Int32Value());
+}
- delete[] my_data;
+
+static void CheckIsNeutered(v8::Handle<v8::TypedArray> ta) {
+ CHECK_EQ(0, static_cast<int>(ta->ByteLength()));
+ CHECK_EQ(0, static_cast<int>(ta->Length()));
+ CHECK_EQ(0, static_cast<int>(ta->ByteOffset()));
}
+template <typename TypedArray, int kElementSize>
+static Handle<TypedArray> CreateAndCheck(Handle<v8::ArrayBuffer> ab,
+ int byteOffset,
+ int length) {
+ v8::Handle<TypedArray> ta = TypedArray::New(ab, byteOffset, length);
+ CHECK_EQ(byteOffset, static_cast<int>(ta->ByteOffset()));
+ CHECK_EQ(length, static_cast<int>(ta->Length()));
+ CHECK_EQ(length * kElementSize, static_cast<int>(ta->ByteLength()));
+ return ta;
+}
+THREADED_TEST(ArrayBuffer_NeuteringApi) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ v8::Handle<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(1024);
+
+ v8::Handle<v8::Uint8Array> u8a =
+ CreateAndCheck<v8::Uint8Array, 1>(buffer, 1, 1023);
+ v8::Handle<v8::Uint8ClampedArray> u8c =
+ CreateAndCheck<v8::Uint8ClampedArray, 1>(buffer, 1, 1023);
+ v8::Handle<v8::Int8Array> i8a =
+ CreateAndCheck<v8::Int8Array, 1>(buffer, 1, 1023);
+
+ v8::Handle<v8::Uint16Array> u16a =
+ CreateAndCheck<v8::Uint16Array, 2>(buffer, 2, 511);
+ v8::Handle<v8::Int16Array> i16a =
+ CreateAndCheck<v8::Int16Array, 2>(buffer, 2, 511);
+
+ v8::Handle<v8::Uint32Array> u32a =
+ CreateAndCheck<v8::Uint32Array, 4>(buffer, 4, 255);
+ v8::Handle<v8::Int32Array> i32a =
+ CreateAndCheck<v8::Int32Array, 4>(buffer, 4, 255);
+
+ v8::Handle<v8::Float32Array> f32a =
+ CreateAndCheck<v8::Float32Array, 4>(buffer, 4, 255);
+ v8::Handle<v8::Float64Array> f64a =
+ CreateAndCheck<v8::Float64Array, 8>(buffer, 8, 127);
+
+ ScopedArrayBufferContents contents(buffer->Externalize());
+ buffer->Neuter();
+ CHECK_EQ(0, static_cast<int>(buffer->ByteLength()));
+ CheckIsNeutered(u8a);
+ CheckIsNeutered(u8c);
+ CheckIsNeutered(i8a);
+ CheckIsNeutered(u16a);
+ CheckIsNeutered(i16a);
+ CheckIsNeutered(u32a);
+ CheckIsNeutered(i32a);
+ CheckIsNeutered(f32a);
+ CheckIsNeutered(f64a);
+}
+
+THREADED_TEST(ArrayBuffer_NeuteringScript) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ CompileRun(
+ "var ab = new ArrayBuffer(1024);"
+ "var u8a = new Uint8Array(ab, 1, 1023);"
+ "var u8c = new Uint8ClampedArray(ab, 1, 1023);"
+ "var i8a = new Int8Array(ab, 1, 1023);"
+ "var u16a = new Uint16Array(ab, 2, 511);"
+ "var i16a = new Int16Array(ab, 2, 511);"
+ "var u32a = new Uint32Array(ab, 4, 255);"
+ "var i32a = new Int32Array(ab, 4, 255);"
+ "var f32a = new Float32Array(ab, 4, 255);"
+ "var f64a = new Float64Array(ab, 8, 127);");
+
+ v8::Handle<v8::ArrayBuffer> ab(v8::ArrayBuffer::Cast(*CompileRun("ab")));
+
+ v8::Handle<v8::Uint8Array> u8a(v8::Uint8Array::Cast(*CompileRun("u8a")));
+ v8::Handle<v8::Uint8ClampedArray> u8c(
+ v8::Uint8ClampedArray::Cast(*CompileRun("u8c")));
+ v8::Handle<v8::Int8Array> i8a(v8::Int8Array::Cast(*CompileRun("i8a")));
+
+ v8::Handle<v8::Uint16Array> u16a(
+ v8::Uint16Array::Cast(*CompileRun("u16a")));
+ v8::Handle<v8::Int16Array> i16a(
+ v8::Int16Array::Cast(*CompileRun("i16a")));
+ v8::Handle<v8::Uint32Array> u32a(
+ v8::Uint32Array::Cast(*CompileRun("u32a")));
+ v8::Handle<v8::Int32Array> i32a(
+ v8::Int32Array::Cast(*CompileRun("i32a")));
+ v8::Handle<v8::Float32Array> f32a(
+ v8::Float32Array::Cast(*CompileRun("f32a")));
+ v8::Handle<v8::Float64Array> f64a(
+ v8::Float64Array::Cast(*CompileRun("f64a")));
+
+ ScopedArrayBufferContents contents(ab->Externalize());
+ ab->Neuter();
+ CHECK_EQ(0, static_cast<int>(ab->ByteLength()));
+ CheckIsNeutered(u8a);
+ CheckIsNeutered(u8c);
+ CheckIsNeutered(i8a);
+ CheckIsNeutered(u16a);
+ CheckIsNeutered(i16a);
+ CheckIsNeutered(u32a);
+ CheckIsNeutered(i32a);
+ CheckIsNeutered(f32a);
+ CheckIsNeutered(f64a);
+}
+
THREADED_TEST(HiddenProperties) {
@@ -2708,71 +2957,75 @@ THREADED_TEST(GlobalHandle) {
v8::Persistent<String> global;
{
v8::HandleScope scope(isolate);
- Local<String> str = v8_str("str");
- global = v8::Persistent<String>::New(isolate, str);
+ global.Reset(isolate, v8_str("str"));
}
- CHECK_EQ(global->Length(), 3);
- global.Dispose(isolate);
-
{
v8::HandleScope scope(isolate);
- Local<String> str = v8_str("str");
- global = v8::Persistent<String>::New(isolate, str);
+ CHECK_EQ(v8::Local<String>::New(isolate, global)->Length(), 3);
}
- CHECK_EQ(global->Length(), 3);
- global.Dispose(isolate);
+ global.Dispose();
+ global.Clear();
+ {
+ v8::HandleScope scope(isolate);
+ global.Reset(isolate, v8_str("str"));
+ }
+ {
+ v8::HandleScope scope(isolate);
+ CHECK_EQ(v8::Local<String>::New(isolate, global)->Length(), 3);
+ }
+ global.Dispose();
}
THREADED_TEST(ResettingGlobalHandle) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::internal::GlobalHandles* global_handles = NULL;
- int initial_handle_count = 0;
v8::Persistent<String> global;
{
v8::HandleScope scope(isolate);
- Local<String> str = v8_str("str");
- global_handles =
- reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
- initial_handle_count = global_handles->NumberOfGlobalHandles();
- global = v8::Persistent<String>::New(isolate, str);
+ global.Reset(isolate, v8_str("str"));
}
- CHECK_EQ(global->Length(), 3);
- CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count + 1);
+ v8::internal::GlobalHandles* global_handles =
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
+ int initial_handle_count = global_handles->NumberOfGlobalHandles();
{
v8::HandleScope scope(isolate);
- Local<String> str = v8_str("longer");
- global.Reset(isolate, str);
+ CHECK_EQ(v8::Local<String>::New(isolate, global)->Length(), 3);
+ }
+ {
+ v8::HandleScope scope(isolate);
+ global.Reset(isolate, v8_str("longer"));
}
- CHECK_EQ(global->Length(), 6);
- CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count + 1);
- global.Dispose(isolate);
CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count);
+ {
+ v8::HandleScope scope(isolate);
+ CHECK_EQ(v8::Local<String>::New(isolate, global)->Length(), 6);
+ }
+ global.Dispose(isolate);
+ CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count - 1);
}
THREADED_TEST(ResettingGlobalHandleToEmpty) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::internal::GlobalHandles* global_handles = NULL;
- int initial_handle_count = 0;
v8::Persistent<String> global;
{
v8::HandleScope scope(isolate);
- Local<String> str = v8_str("str");
- global_handles =
- reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
- initial_handle_count = global_handles->NumberOfGlobalHandles();
- global = v8::Persistent<String>::New(isolate, str);
+ global.Reset(isolate, v8_str("str"));
+ }
+ v8::internal::GlobalHandles* global_handles =
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
+ int initial_handle_count = global_handles->NumberOfGlobalHandles();
+ {
+ v8::HandleScope scope(isolate);
+ CHECK_EQ(v8::Local<String>::New(isolate, global)->Length(), 3);
}
- CHECK_EQ(global->Length(), 3);
- CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count + 1);
{
v8::HandleScope scope(isolate);
Local<String> empty;
global.Reset(isolate, empty);
}
CHECK(global.IsEmpty());
- CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count);
+ CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count - 1);
}
@@ -2787,7 +3040,7 @@ THREADED_TEST(ClearAndLeakGlobal) {
global_handles =
reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
initial_handle_count = global_handles->NumberOfGlobalHandles();
- global = v8::Persistent<String>::New(isolate, str);
+ global.Reset(isolate, str);
}
CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count + 1);
String* str = global.ClearAndLeak();
@@ -2800,6 +3053,24 @@ THREADED_TEST(ClearAndLeakGlobal) {
}
+THREADED_TEST(GlobalHandleUpcast) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ v8::Local<String> local = v8::Local<String>::New(v8_str("str"));
+ v8::Persistent<String> global_string(isolate, local);
+#ifdef V8_USE_UNSAFE_HANDLES
+ v8::Persistent<Value> global_value =
+ v8::Persistent<Value>::Cast(global_string);
+#else
+ v8::Persistent<Value>& global_value =
+ v8::Persistent<Value>::Cast(global_string);
+#endif
+ CHECK(v8::Local<v8::Value>::New(isolate, global_value)->IsString());
+ CHECK(global_string == v8::Persistent<String>::Cast(global_value));
+ global_string.Dispose();
+}
+
+
THREADED_TEST(LocalHandle) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Local<String> local = v8::Local<String>::New(v8_str("str"));
@@ -2823,7 +3094,7 @@ class WeakCallCounter {
static void WeakPointerCallback(v8::Isolate* isolate,
- Persistent<Object>* handle,
+ Persistent<Value>* handle,
WeakCallCounter* counter) {
CHECK_EQ(1234, counter->id());
counter->increment();
@@ -2831,129 +3102,48 @@ static void WeakPointerCallback(v8::Isolate* isolate,
}
-THREADED_TEST(OldApiObjectGroups) {
+THREADED_TEST(ApiObjectGroups) {
LocalContext env;
v8::Isolate* iso = env->GetIsolate();
HandleScope scope(iso);
- Persistent<Object> g1s1;
- Persistent<Object> g1s2;
- Persistent<Object> g1c1;
- Persistent<Object> g2s1;
- Persistent<Object> g2s2;
- Persistent<Object> g2c1;
+ Persistent<Value> g1s1;
+ Persistent<Value> g1s2;
+ Persistent<Value> g1c1;
+ Persistent<Value> g2s1;
+ Persistent<Value> g2s2;
+ Persistent<Value> g2c1;
WeakCallCounter counter(1234);
{
HandleScope scope(iso);
- g1s1 = Persistent<Object>::New(iso, Object::New());
- g1s2 = Persistent<Object>::New(iso, Object::New());
- g1c1 = Persistent<Object>::New(iso, Object::New());
+ g1s1.Reset(iso, Object::New());
+ g1s2.Reset(iso, Object::New());
+ g1c1.Reset(iso, Object::New());
g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
g1c1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g2s1 = Persistent<Object>::New(iso, Object::New());
- g2s2 = Persistent<Object>::New(iso, Object::New());
- g2c1 = Persistent<Object>::New(iso, Object::New());
+ g2s1.Reset(iso, Object::New());
+ g2s2.Reset(iso, Object::New());
+ g2c1.Reset(iso, Object::New());
g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
g2c1.MakeWeak(iso, &counter, &WeakPointerCallback);
}
- Persistent<Object> root = Persistent<Object>::New(iso, g1s1); // make a root.
+ Persistent<Value> root(iso, g1s1); // make a root.
// Connect group 1 and 2, make a cycle.
- CHECK(g1s2->Set(0, Handle<Object>(*g2s2)));
- CHECK(g2s1->Set(0, Handle<Object>(*g1s1)));
-
- {
- Persistent<Value> g1_objects[] = { g1s1, g1s2 };
- Persistent<Value> g1_children[] = { g1c1 };
- Persistent<Value> g2_objects[] = { g2s1, g2s2 };
- Persistent<Value> g2_children[] = { g2c1 };
- V8::AddObjectGroup(g1_objects, 2);
- V8::AddImplicitReferences(g1s1, g1_children, 1);
- V8::AddObjectGroup(g2_objects, 2);
- V8::AddImplicitReferences(g2s1, g2_children, 1);
- }
- // Do a single full GC, ensure incremental marking is stopped.
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
-
- // All object should be alive.
- CHECK_EQ(0, counter.NumberOfWeakCalls());
-
- // Weaken the root.
- root.MakeWeak(iso, &counter, &WeakPointerCallback);
- // But make children strong roots---all the objects (except for children)
- // should be collectable now.
- g1c1.ClearWeak(iso);
- g2c1.ClearWeak(iso);
-
- // Groups are deleted, rebuild groups.
- {
- Persistent<Value> g1_objects[] = { g1s1, g1s2 };
- Persistent<Value> g1_children[] = { g1c1 };
- Persistent<Value> g2_objects[] = { g2s1, g2s2 };
- Persistent<Value> g2_children[] = { g2c1 };
- V8::AddObjectGroup(g1_objects, 2);
- V8::AddImplicitReferences(g1s1, g1_children, 1);
- V8::AddObjectGroup(g2_objects, 2);
- V8::AddImplicitReferences(g2s1, g2_children, 1);
- }
-
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
-
- // All objects should be gone. 5 global handles in total.
- CHECK_EQ(5, counter.NumberOfWeakCalls());
-
- // And now make children weak again and collect them.
- g1c1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g2c1.MakeWeak(iso, &counter, &WeakPointerCallback);
-
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(7, counter.NumberOfWeakCalls());
-}
-
-
-THREADED_TEST(ApiObjectGroups) {
- LocalContext env;
- v8::Isolate* iso = env->GetIsolate();
- HandleScope scope(iso);
-
- Persistent<Object> g1s1;
- Persistent<Object> g1s2;
- Persistent<Object> g1c1;
- Persistent<Object> g2s1;
- Persistent<Object> g2s2;
- Persistent<Object> g2c1;
-
- WeakCallCounter counter(1234);
-
{
HandleScope scope(iso);
- g1s1 = Persistent<Object>::New(iso, Object::New());
- g1s2 = Persistent<Object>::New(iso, Object::New());
- g1c1 = Persistent<Object>::New(iso, Object::New());
- g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
- g1c1.MakeWeak(iso, &counter, &WeakPointerCallback);
-
- g2s1 = Persistent<Object>::New(iso, Object::New());
- g2s2 = Persistent<Object>::New(iso, Object::New());
- g2c1 = Persistent<Object>::New(iso, Object::New());
- g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
- g2c1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ CHECK(Local<Object>::New(iso, g1s2.As<Object>())->
+ Set(0, Local<Value>(*g2s2)));
+ CHECK(Local<Object>::New(iso, g2s1.As<Object>())->
+ Set(0, Local<Value>(*g1s1)));
}
- Persistent<Object> root = Persistent<Object>::New(iso, g1s1); // make a root.
-
- // Connect group 1 and 2, make a cycle.
- CHECK(g1s2->Set(0, Local<Value>(*g2s2)));
- CHECK(g2s1->Set(0, Local<Value>(*g1s1)));
-
{
UniqueId id1(reinterpret_cast<intptr_t>(*g1s1));
UniqueId id2(reinterpret_cast<intptr_t>(*g2s2));
@@ -3005,112 +3195,6 @@ THREADED_TEST(ApiObjectGroups) {
}
-THREADED_TEST(OldApiObjectGroupsCycle) {
- LocalContext env;
- v8::Isolate* iso = env->GetIsolate();
- HandleScope scope(iso);
-
- WeakCallCounter counter(1234);
-
- Persistent<Object> g1s1;
- Persistent<Object> g1s2;
- Persistent<Object> g2s1;
- Persistent<Object> g2s2;
- Persistent<Object> g3s1;
- Persistent<Object> g3s2;
- Persistent<Object> g4s1;
- Persistent<Object> g4s2;
-
- {
- HandleScope scope(iso);
- g1s1 = Persistent<Object>::New(iso, Object::New());
- g1s2 = Persistent<Object>::New(iso, Object::New());
- g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
- CHECK(g1s1.IsWeak(iso));
- CHECK(g1s2.IsWeak(iso));
-
- g2s1 = Persistent<Object>::New(iso, Object::New());
- g2s2 = Persistent<Object>::New(iso, Object::New());
- g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
- CHECK(g2s1.IsWeak(iso));
- CHECK(g2s2.IsWeak(iso));
-
- g3s1 = Persistent<Object>::New(iso, Object::New());
- g3s2 = Persistent<Object>::New(iso, Object::New());
- g3s1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g3s2.MakeWeak(iso, &counter, &WeakPointerCallback);
- CHECK(g3s1.IsWeak(iso));
- CHECK(g3s2.IsWeak(iso));
-
- g4s1 = Persistent<Object>::New(iso, Object::New());
- g4s2 = Persistent<Object>::New(iso, Object::New());
- g4s1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g4s2.MakeWeak(iso, &counter, &WeakPointerCallback);
- CHECK(g4s1.IsWeak(iso));
- CHECK(g4s2.IsWeak(iso));
- }
-
- Persistent<Object> root = Persistent<Object>::New(iso, g1s1); // make a root.
-
- // Connect groups. We're building the following cycle:
- // G1: { g1s1, g2s1 }, g1s1 implicitly references g2s1, ditto for other
- // groups.
- {
- Persistent<Value> g1_objects[] = { g1s1, g1s2 };
- Persistent<Value> g1_children[] = { g2s1 };
- Persistent<Value> g2_objects[] = { g2s1, g2s2 };
- Persistent<Value> g2_children[] = { g3s1 };
- Persistent<Value> g3_objects[] = { g3s1, g3s2 };
- Persistent<Value> g3_children[] = { g4s1 };
- Persistent<Value> g4_objects[] = { g4s1, g4s2 };
- Persistent<Value> g4_children[] = { g1s1 };
- V8::AddObjectGroup(g1_objects, 2);
- V8::AddImplicitReferences(g1s1, g1_children, 1);
- V8::AddObjectGroup(g2_objects, 2);
- V8::AddImplicitReferences(g2s1, g2_children, 1);
- V8::AddObjectGroup(g3_objects, 2);
- V8::AddImplicitReferences(g3s1, g3_children, 1);
- V8::AddObjectGroup(iso, g4_objects, 2);
- V8::AddImplicitReferences(g4s1, g4_children, 1);
- }
- // Do a single full GC
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
-
- // All object should be alive.
- CHECK_EQ(0, counter.NumberOfWeakCalls());
-
- // Weaken the root.
- root.MakeWeak(iso, &counter, &WeakPointerCallback);
-
- // Groups are deleted, rebuild groups.
- {
- Persistent<Value> g1_objects[] = { g1s1, g1s2 };
- Persistent<Value> g1_children[] = { g2s1 };
- Persistent<Value> g2_objects[] = { g2s1, g2s2 };
- Persistent<Value> g2_children[] = { g3s1 };
- Persistent<Value> g3_objects[] = { g3s1, g3s2 };
- Persistent<Value> g3_children[] = { g4s1 };
- Persistent<Value> g4_objects[] = { g4s1, g4s2 };
- Persistent<Value> g4_children[] = { g1s1 };
- V8::AddObjectGroup(g1_objects, 2);
- V8::AddImplicitReferences(g1s1, g1_children, 1);
- V8::AddObjectGroup(g2_objects, 2);
- V8::AddImplicitReferences(g2s1, g2_children, 1);
- V8::AddObjectGroup(g3_objects, 2);
- V8::AddImplicitReferences(g3s1, g3_children, 1);
- V8::AddObjectGroup(g4_objects, 2);
- V8::AddImplicitReferences(g4s1, g4_children, 1);
- }
-
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
-
- // All objects should be gone. 9 global handles in total.
- CHECK_EQ(9, counter.NumberOfWeakCalls());
-}
-
-
THREADED_TEST(ApiObjectGroupsCycle) {
LocalContext env;
v8::Isolate* iso = env->GetIsolate();
@@ -3118,47 +3202,47 @@ THREADED_TEST(ApiObjectGroupsCycle) {
WeakCallCounter counter(1234);
- Persistent<Object> g1s1;
- Persistent<Object> g1s2;
- Persistent<Object> g2s1;
- Persistent<Object> g2s2;
- Persistent<Object> g3s1;
- Persistent<Object> g3s2;
- Persistent<Object> g4s1;
- Persistent<Object> g4s2;
+ Persistent<Value> g1s1;
+ Persistent<Value> g1s2;
+ Persistent<Value> g2s1;
+ Persistent<Value> g2s2;
+ Persistent<Value> g3s1;
+ Persistent<Value> g3s2;
+ Persistent<Value> g4s1;
+ Persistent<Value> g4s2;
{
HandleScope scope(iso);
- g1s1 = Persistent<Object>::New(iso, Object::New());
- g1s2 = Persistent<Object>::New(iso, Object::New());
+ g1s1.Reset(iso, Object::New());
+ g1s2.Reset(iso, Object::New());
g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g1s1.IsWeak(iso));
CHECK(g1s2.IsWeak(iso));
- g2s1 = Persistent<Object>::New(iso, Object::New());
- g2s2 = Persistent<Object>::New(iso, Object::New());
+ g2s1.Reset(iso, Object::New());
+ g2s2.Reset(iso, Object::New());
g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g2s1.IsWeak(iso));
CHECK(g2s2.IsWeak(iso));
- g3s1 = Persistent<Object>::New(iso, Object::New());
- g3s2 = Persistent<Object>::New(iso, Object::New());
+ g3s1.Reset(iso, Object::New());
+ g3s2.Reset(iso, Object::New());
g3s1.MakeWeak(iso, &counter, &WeakPointerCallback);
g3s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g3s1.IsWeak(iso));
CHECK(g3s2.IsWeak(iso));
- g4s1 = Persistent<Object>::New(iso, Object::New());
- g4s2 = Persistent<Object>::New(iso, Object::New());
+ g4s1.Reset(iso, Object::New());
+ g4s2.Reset(iso, Object::New());
g4s1.MakeWeak(iso, &counter, &WeakPointerCallback);
g4s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g4s1.IsWeak(iso));
CHECK(g4s2.IsWeak(iso));
}
- Persistent<Object> root = Persistent<Object>::New(iso, g1s1); // make a root.
+ Persistent<Value> root(iso, g1s1); // make a root.
// Connect groups. We're building the following cycle:
// G1: { g1s1, g2s1 }, g1s1 implicitly references g2s1, ditto for other
@@ -3221,103 +3305,6 @@ THREADED_TEST(ApiObjectGroupsCycle) {
// TODO(mstarzinger): This should be a THREADED_TEST but causes failures
// on the buildbots, so was made non-threaded for the time being.
-TEST(OldApiObjectGroupsCycleForScavenger) {
- i::FLAG_stress_compaction = false;
- i::FLAG_gc_global = false;
- LocalContext env;
- v8::Isolate* iso = env->GetIsolate();
- HandleScope scope(iso);
-
- WeakCallCounter counter(1234);
-
- Persistent<Object> g1s1;
- Persistent<Object> g1s2;
- Persistent<Object> g2s1;
- Persistent<Object> g2s2;
- Persistent<Object> g3s1;
- Persistent<Object> g3s2;
-
- {
- HandleScope scope(iso);
- g1s1 = Persistent<Object>::New(iso, Object::New());
- g1s2 = Persistent<Object>::New(iso, Object::New());
- g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
-
- g2s1 = Persistent<Object>::New(iso, Object::New());
- g2s2 = Persistent<Object>::New(iso, Object::New());
- g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
-
- g3s1 = Persistent<Object>::New(iso, Object::New());
- g3s2 = Persistent<Object>::New(iso, Object::New());
- g3s1.MakeWeak(iso, &counter, &WeakPointerCallback);
- g3s2.MakeWeak(iso, &counter, &WeakPointerCallback);
- }
-
- // Make a root.
- Persistent<Object> root = Persistent<Object>::New(iso, g1s1);
- root.MarkPartiallyDependent(iso);
-
- // Connect groups. We're building the following cycle:
- // G1: { g1s1, g2s1 }, g1s1 implicitly references g2s1, ditto for other
- // groups.
- {
- g1s1.MarkPartiallyDependent(iso);
- g1s2.MarkPartiallyDependent(iso);
- g2s1.MarkPartiallyDependent(iso);
- g2s2.MarkPartiallyDependent(iso);
- g3s1.MarkPartiallyDependent(iso);
- g3s2.MarkPartiallyDependent(iso);
- Persistent<Value> g1_objects[] = { g1s1, g1s2 };
- Persistent<Value> g2_objects[] = { g2s1, g2s2 };
- Persistent<Value> g3_objects[] = { g3s1, g3s2 };
- V8::AddObjectGroup(g1_objects, 2);
- g1s1->Set(v8_str("x"), Handle<Object>(*g2s1));
- V8::AddObjectGroup(g2_objects, 2);
- g2s1->Set(v8_str("x"), Handle<Object>(*g3s1));
- V8::AddObjectGroup(g3_objects, 2);
- g3s1->Set(v8_str("x"), Handle<Object>(*g1s1));
- }
-
- HEAP->CollectGarbage(i::NEW_SPACE);
-
- // All objects should be alive.
- CHECK_EQ(0, counter.NumberOfWeakCalls());
-
- // Weaken the root.
- root.MakeWeak(iso, &counter, &WeakPointerCallback);
- root.MarkPartiallyDependent(iso);
-
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- // Groups are deleted, rebuild groups.
- {
- g1s1.MarkPartiallyDependent(isolate);
- g1s2.MarkPartiallyDependent(isolate);
- g2s1.MarkPartiallyDependent(isolate);
- g2s2.MarkPartiallyDependent(isolate);
- g3s1.MarkPartiallyDependent(isolate);
- g3s2.MarkPartiallyDependent(isolate);
- Persistent<Value> g1_objects[] = { g1s1, g1s2 };
- Persistent<Value> g2_objects[] = { g2s1, g2s2 };
- Persistent<Value> g3_objects[] = { g3s1, g3s2 };
- V8::AddObjectGroup(g1_objects, 2);
- g1s1->Set(v8_str("x"), Handle<Object>(*g2s1));
- V8::AddObjectGroup(g2_objects, 2);
- g2s1->Set(v8_str("x"), Handle<Object>(*g3s1));
- V8::AddObjectGroup(g3_objects, 2);
- g3s1->Set(v8_str("x"), Handle<Object>(*g1s1));
- }
-
- HEAP->CollectGarbage(i::NEW_SPACE);
-
- // All objects should be gone. 7 global handles in total.
- CHECK_EQ(7, counter.NumberOfWeakCalls());
-}
-
-
-// TODO(mstarzinger): This should be a THREADED_TEST but causes failures
-// on the buildbots, so was made non-threaded for the time being.
TEST(ApiObjectGroupsCycleForScavenger) {
i::FLAG_stress_compaction = false;
i::FLAG_gc_global = false;
@@ -3327,39 +3314,40 @@ TEST(ApiObjectGroupsCycleForScavenger) {
WeakCallCounter counter(1234);
- Persistent<Object> g1s1;
- Persistent<Object> g1s2;
- Persistent<Object> g2s1;
- Persistent<Object> g2s2;
- Persistent<Object> g3s1;
- Persistent<Object> g3s2;
+ Persistent<Value> g1s1;
+ Persistent<Value> g1s2;
+ Persistent<Value> g2s1;
+ Persistent<Value> g2s2;
+ Persistent<Value> g3s1;
+ Persistent<Value> g3s2;
{
HandleScope scope(iso);
- g1s1 = Persistent<Object>::New(iso, Object::New());
- g1s2 = Persistent<Object>::New(iso, Object::New());
+ g1s1.Reset(iso, Object::New());
+ g1s2.Reset(iso, Object::New());
g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
- g2s1 = Persistent<Object>::New(iso, Object::New());
- g2s2 = Persistent<Object>::New(iso, Object::New());
+ g2s1.Reset(iso, Object::New());
+ g2s2.Reset(iso, Object::New());
g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
- g3s1 = Persistent<Object>::New(iso, Object::New());
- g3s2 = Persistent<Object>::New(iso, Object::New());
+ g3s1.Reset(iso, Object::New());
+ g3s2.Reset(iso, Object::New());
g3s1.MakeWeak(iso, &counter, &WeakPointerCallback);
g3s2.MakeWeak(iso, &counter, &WeakPointerCallback);
}
// Make a root.
- Persistent<Object> root = Persistent<Object>::New(iso, g1s1);
+ Persistent<Value> root(iso, g1s1);
root.MarkPartiallyDependent(iso);
// Connect groups. We're building the following cycle:
// G1: { g1s1, g2s1 }, g1s1 implicitly references g2s1, ditto for other
// groups.
{
+ HandleScope handle_scope(iso);
g1s1.MarkPartiallyDependent(iso);
g1s2.MarkPartiallyDependent(iso);
g2s1.MarkPartiallyDependent(iso);
@@ -3368,13 +3356,16 @@ TEST(ApiObjectGroupsCycleForScavenger) {
g3s2.MarkPartiallyDependent(iso);
iso->SetObjectGroupId(g1s1, UniqueId(1));
iso->SetObjectGroupId(g1s2, UniqueId(1));
- g1s1->Set(v8_str("x"), Local<Value>(*g2s1));
+ Local<Object>::New(iso, g1s1.As<Object>())->Set(v8_str("x"),
+ Local<Value>(*g2s1));
iso->SetObjectGroupId(g2s1, UniqueId(2));
iso->SetObjectGroupId(g2s2, UniqueId(2));
- g2s1->Set(v8_str("x"), Local<Value>(*g3s1));
+ Local<Object>::New(iso, g2s1.As<Object>())->Set(v8_str("x"),
+ Local<Value>(*g3s1));
iso->SetObjectGroupId(g3s1, UniqueId(3));
iso->SetObjectGroupId(g3s2, UniqueId(3));
- g3s1->Set(v8_str("x"), Local<Value>(*g1s1));
+ Local<Object>::New(iso, g3s1.As<Object>())->Set(v8_str("x"),
+ Local<Value>(*g1s1));
}
v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
@@ -3391,6 +3382,7 @@ TEST(ApiObjectGroupsCycleForScavenger) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
// Groups are deleted, rebuild groups.
{
+ HandleScope handle_scope(iso);
g1s1.MarkPartiallyDependent(isolate);
g1s2.MarkPartiallyDependent(isolate);
g2s1.MarkPartiallyDependent(isolate);
@@ -3399,13 +3391,16 @@ TEST(ApiObjectGroupsCycleForScavenger) {
g3s2.MarkPartiallyDependent(isolate);
iso->SetObjectGroupId(g1s1, UniqueId(1));
iso->SetObjectGroupId(g1s2, UniqueId(1));
- g1s1->Set(v8_str("x"), Local<Value>(*g2s1));
+ Local<Object>::New(iso, g1s1.As<Object>())->Set(v8_str("x"),
+ Local<Value>(*g2s1));
iso->SetObjectGroupId(g2s1, UniqueId(2));
iso->SetObjectGroupId(g2s2, UniqueId(2));
- g2s1->Set(v8_str("x"), Local<Value>(*g3s1));
+ Local<Object>::New(iso, g2s1.As<Object>())->Set(v8_str("x"),
+ Local<Value>(*g3s1));
iso->SetObjectGroupId(g3s1, UniqueId(3));
iso->SetObjectGroupId(g3s2, UniqueId(3));
- g3s1->Set(v8_str("x"), Local<Value>(*g1s1));
+ Local<Object>::New(iso, g3s1.As<Object>())->Set(v8_str("x"),
+ Local<Value>(*g1s1));
}
heap->CollectGarbage(i::NEW_SPACE);
@@ -4544,9 +4539,8 @@ THREADED_TEST(Equality) {
CHECK(!v8::False()->StrictEquals(v8::Undefined()));
v8::Handle<v8::Object> obj = v8::Object::New();
- v8::Persistent<v8::Object> alias =
- v8::Persistent<v8::Object>::New(isolate, obj);
- CHECK(alias->StrictEquals(obj));
+ v8::Persistent<v8::Object> alias(isolate, obj);
+ CHECK(v8::Local<v8::Object>::New(isolate, alias)->StrictEquals(obj));
alias.Dispose(isolate);
}
@@ -4845,7 +4839,7 @@ static void SetXValue(Local<String> name,
CHECK_EQ(info.Data(), v8_str("donut"));
CHECK_EQ(name, v8_str("x"));
CHECK(xValue.IsEmpty());
- xValue = v8::Persistent<Value>::New(info.GetIsolate(), value);
+ xValue.Reset(info.GetIsolate(), value);
}
@@ -4861,7 +4855,7 @@ THREADED_TEST(SimplePropertyWrite) {
script->Run();
CHECK_EQ(v8_num(4), Handle<Value>(*xValue));
xValue.Dispose(context->GetIsolate());
- xValue = v8::Persistent<Value>();
+ xValue.Clear();
}
}
@@ -4878,7 +4872,7 @@ THREADED_TEST(SetterOnly) {
script->Run();
CHECK_EQ(v8_num(4), Handle<Value>(*xValue));
xValue.Dispose(context->GetIsolate());
- xValue = v8::Persistent<Value>();
+ xValue.Clear();
}
}
@@ -5741,15 +5735,14 @@ template <typename T> static void USE(T) { }
static inline void PersistentHandles(v8::Isolate* isolate) {
USE(PersistentHandles);
Local<String> str = v8_str("foo");
- v8::Persistent<String> p_str = v8::Persistent<String>::New(isolate, str);
- USE(p_str);
+ v8::Persistent<String> p_str(isolate, str);
+ p_str.Dispose();
Local<Script> scr = Script::Compile(v8_str(""));
- v8::Persistent<Script> p_scr = v8::Persistent<Script>::New(isolate, scr);
- USE(p_scr);
+ v8::Persistent<Script> p_scr(isolate, scr);
+ p_scr.Dispose();
Local<ObjectTemplate> templ = ObjectTemplate::New();
- v8::Persistent<ObjectTemplate> p_templ =
- v8::Persistent<ObjectTemplate>::New(isolate, templ);
- USE(p_templ);
+ v8::Persistent<ObjectTemplate> p_templ(isolate, templ);
+ p_templ.Dispose();
}
@@ -6253,10 +6246,7 @@ class Whammy {
explicit Whammy(v8::Isolate* isolate) : cursor_(0), isolate_(isolate) { }
~Whammy() { script_.Dispose(isolate_); }
v8::Handle<Script> getScript() {
- if (script_.IsEmpty()) {
- script_ = v8::Persistent<Script>::New(isolate_,
- v8_compile("({}).blammo"));
- }
+ if (script_.IsEmpty()) script_.Reset(isolate_, v8_compile("({}).blammo"));
return Local<Script>(*script_);
}
@@ -6280,19 +6270,18 @@ v8::Handle<Value> WhammyPropertyGetter(Local<String> name,
Whammy* whammy =
static_cast<Whammy*>(v8::Handle<v8::External>::Cast(info.Data())->Value());
- v8::Persistent<v8::Object> prev = whammy->objects_[whammy->cursor_];
+ v8::Persistent<v8::Object>& prev = whammy->objects_[whammy->cursor_];
v8::Handle<v8::Object> obj = v8::Object::New();
- v8::Persistent<v8::Object> global =
- v8::Persistent<v8::Object>::New(info.GetIsolate(), obj);
if (!prev.IsEmpty()) {
- prev->Set(v8_str("next"), obj);
+ v8::Local<v8::Object>::New(info.GetIsolate(), prev)
+ ->Set(v8_str("next"), obj);
prev.MakeWeak<Value, Snorkel>(info.GetIsolate(),
new Snorkel(),
&HandleWeakReference);
whammy->objects_[whammy->cursor_].Clear();
}
- whammy->objects_[whammy->cursor_] = global;
+ whammy->objects_[whammy->cursor_].Reset(info.GetIsolate(), obj);
whammy->cursor_ = (whammy->cursor_ + 1) % Whammy::kObjectCount;
return whammy->getScript()->Run();
}
@@ -6345,8 +6334,8 @@ THREADED_TEST(IndependentWeakHandle) {
{
v8::HandleScope handle_scope(iso);
- object_a = v8::Persistent<v8::Object>::New(iso, v8::Object::New());
- object_b = v8::Persistent<v8::Object>::New(iso, v8::Object::New());
+ object_a.Reset(iso, v8::Object::New());
+ object_b.Reset(iso, v8::Object::New());
}
bool object_a_disposed = false;
@@ -6410,7 +6399,7 @@ THREADED_TEST(GCFromWeakCallbacks) {
v8::Persistent<v8::Object> object;
{
v8::HandleScope handle_scope(isolate);
- object = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ object.Reset(isolate, v8::Object::New());
}
bool disposed = false;
object.MakeWeak(isolate, &disposed, gc_forcing_callback[inner_gc]);
@@ -6439,10 +6428,11 @@ THREADED_TEST(IndependentHandleRevival) {
v8::Persistent<v8::Object> object;
{
v8::HandleScope handle_scope(isolate);
- object = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
- object->Set(v8_str("x"), v8::Integer::New(1));
+ v8::Local<v8::Object> o = v8::Object::New();
+ object.Reset(isolate, o);
+ o->Set(v8_str("x"), v8::Integer::New(1));
v8::Local<String> y_str = v8_str("y");
- object->Set(y_str, y_str);
+ o->Set(y_str, y_str);
}
bool revived = false;
object.MakeWeak(isolate, &revived, &RevivingCallback);
@@ -6452,9 +6442,10 @@ THREADED_TEST(IndependentHandleRevival) {
HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
{
v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Object> o = v8::Local<v8::Object>::New(isolate, object);
v8::Local<String> y_str = v8_str("y");
- CHECK_EQ(v8::Integer::New(1), object->Get(v8_str("x")));
- CHECK(object->Get(y_str)->Equals(y_str));
+ CHECK_EQ(v8::Integer::New(1), o->Get(v8_str("x")));
+ CHECK(o->Get(y_str)->Equals(y_str));
}
}
@@ -11982,7 +11973,7 @@ static v8::Handle<Value> ThrowInJSNoCatch(const v8::Arguments& args) {
v8::HandleScope scope(args.GetIsolate());
v8::Handle<Value> value = CompileRun(code);
CHECK(value.IsEmpty());
- return v8_str("foo");
+ return scope.Close(v8_str("foo"));
}
}
@@ -12150,7 +12141,7 @@ void NewPersistentHandleCallback(v8::Isolate* isolate,
v8::Persistent<v8::Value>* handle,
void*) {
v8::HandleScope scope(isolate);
- bad_handle = v8::Persistent<v8::Object>::New(isolate, some_object);
+ bad_handle.Reset(isolate, some_object);
handle->Dispose(isolate);
}
@@ -12162,9 +12153,9 @@ THREADED_TEST(NewPersistentHandleFromWeakCallback) {
v8::Persistent<v8::Object> handle1, handle2;
{
v8::HandleScope scope(isolate);
- some_object = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
- handle1 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
- handle2 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ some_object.Reset(isolate, v8::Object::New());
+ handle1.Reset(isolate, v8::Object::New());
+ handle2.Reset(isolate, v8::Object::New());
}
// Note: order is implementation dependent alas: currently
// global handle nodes are processed by PostGarbageCollectionProcessing
@@ -12196,11 +12187,11 @@ THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) {
v8::Persistent<v8::Object> handle1, handle2;
{
v8::HandleScope scope(isolate);
- handle1 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
- handle2 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ handle1.Reset(isolate, v8::Object::New());
+ handle2.Reset(isolate, v8::Object::New());
}
handle1.MakeWeak<v8::Value, void>(isolate, NULL, DisposeAndForceGcCallback);
- to_be_disposed = handle2;
+ to_be_disposed.Reset(isolate, handle2);
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -12226,9 +12217,9 @@ THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) {
v8::Persistent<v8::Object> handle1, handle2, handle3;
{
v8::HandleScope scope(isolate);
- handle3 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
- handle2 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
- handle1 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ handle3.Reset(isolate, v8::Object::New());
+ handle2.Reset(isolate, v8::Object::New());
+ handle1.Reset(isolate, v8::Object::New());
}
handle2.MakeWeak<v8::Value, void>(isolate, NULL, DisposingCallback);
handle3.MakeWeak<v8::Value, void>(isolate, NULL, HandleCreatingCallback);
@@ -12282,9 +12273,10 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
static i::Handle<i::JSFunction>* foo_ptr = NULL;
-static int foo_count = 0;
+static int foo_entry_count = 0;
static i::Handle<i::JSFunction>* bar_ptr = NULL;
-static int bar_count = 0;
+static int bar_entry_count = 0;
+static int bar_caller_count = 0;
static void entry_hook(uintptr_t function,
@@ -12294,14 +12286,21 @@ static void entry_hook(uintptr_t function,
CHECK(code != NULL);
if (bar_ptr != NULL && code == (*bar_ptr)->code())
- ++bar_count;
+ ++bar_entry_count;
if (foo_ptr != NULL && code == (*foo_ptr)->code())
- ++foo_count;
+ ++foo_entry_count;
- // TODO(siggi): Verify return_addr_location.
- // This can be done by capturing JitCodeEvents, but requires an ordered
- // collection.
+ // Let's check whether bar is the caller.
+ if (bar_ptr != NULL) {
+ const v8::internal::byte* caller =
+ *reinterpret_cast<v8::internal::byte**>(return_addr_location);
+
+ if ((*bar_ptr)->code()->instruction_start() <= caller &&
+ (*bar_ptr)->code()->instruction_end() > caller) {
+ ++bar_caller_count;
+ }
+ }
}
@@ -12372,17 +12371,20 @@ TEST(SetFunctionEntryHook) {
CHECK(v8::V8::SetFunctionEntryHook(NULL));
// Reset the entry count to zero and set the entry hook.
- bar_count = 0;
- foo_count = 0;
+ bar_entry_count = 0;
+ bar_caller_count = 0;
+ foo_entry_count = 0;
CHECK(v8::V8::SetFunctionEntryHook(entry_hook));
RunLoopInNewEnv();
- CHECK_EQ(2, bar_count);
- CHECK_EQ(200, foo_count);
+ CHECK_EQ(2, bar_entry_count);
+ CHECK_EQ(200, bar_caller_count);
+ CHECK_EQ(200, foo_entry_count);
// Clear the entry hook and count.
- bar_count = 0;
- foo_count = 0;
+ bar_entry_count = 0;
+ bar_caller_count = 0;
+ foo_entry_count = 0;
v8::V8::SetFunctionEntryHook(NULL);
// Clear the compilation cache to make sure we don't reuse the
@@ -12391,8 +12393,9 @@ TEST(SetFunctionEntryHook) {
// Verify that entry hooking is now disabled.
RunLoopInNewEnv();
- CHECK_EQ(0u, bar_count);
- CHECK_EQ(0u, foo_count);
+ CHECK_EQ(0u, bar_entry_count);
+ CHECK_EQ(0u, bar_caller_count);
+ CHECK_EQ(0u, foo_entry_count);
}
@@ -12667,10 +12670,13 @@ THREADED_TEST(DisposeEnteredContext) {
}
v8::HandleScope scope(isolate);
{
- inner->Enter();
- inner.Dispose(inner->GetIsolate());
+ // Don't want a handle here, so do this unsafely
+ v8::Handle<v8::Context> inner_local =
+ *reinterpret_cast<v8::Handle<v8::Context>*>(&inner);
+ inner_local->Enter();
+ inner.Dispose();
inner.Clear();
- inner->Exit();
+ inner_local->Exit();
}
}
@@ -12687,10 +12693,10 @@ THREADED_TEST(Regress54) {
v8::HandleScope inner(isolate);
v8::Handle<v8::ObjectTemplate> local = v8::ObjectTemplate::New();
local->SetInternalFieldCount(1);
- templ =
- v8::Persistent<v8::ObjectTemplate>::New(isolate, inner.Close(local));
+ templ.Reset(isolate, inner.Close(local));
}
- v8::Handle<v8::Object> result = templ->NewInstance();
+ v8::Handle<v8::Object> result =
+ v8::Local<v8::ObjectTemplate>::New(isolate, templ)->NewInstance();
CHECK_EQ(1, result->InternalFieldCount());
}
@@ -13705,6 +13711,7 @@ THREADED_TEST(MorphCompositeStringTest) {
uint16_t* two_byte_string = AsciiToTwoByteString(c_string);
{
LocalContext env;
+ i::Factory* factory = i::Isolate::Current()->factory();
v8::HandleScope scope(env->GetIsolate());
AsciiVectorResource ascii_resource(
i::Vector<const char>(c_string, i::StrLength(c_string)));
@@ -13713,9 +13720,9 @@ THREADED_TEST(MorphCompositeStringTest) {
i::StrLength(c_string)));
Local<String> lhs(v8::Utils::ToLocal(
- FACTORY->NewExternalStringFromAscii(&ascii_resource)));
+ factory->NewExternalStringFromAscii(&ascii_resource)));
Local<String> rhs(v8::Utils::ToLocal(
- FACTORY->NewExternalStringFromAscii(&ascii_resource)));
+ factory->NewExternalStringFromAscii(&ascii_resource)));
env->Global()->Set(v8_str("lhs"), lhs);
env->Global()->Set(v8_str("rhs"), rhs);
@@ -13802,6 +13809,8 @@ class RegExpStringModificationTest {
uc16_resource_(i::Vector<const uint16_t>(two_byte_content_, 15)) {}
~RegExpStringModificationTest() { delete block_; }
void RunTest() {
+ i::Factory* factory = i::Isolate::Current()->factory();
+
regexp_success_ = false;
morph_success_ = false;
@@ -13814,11 +13823,11 @@ class RegExpStringModificationTest {
// Create the input string for the regexp - the one we are going to change
// properties of.
- input_ = FACTORY->NewExternalStringFromAscii(&ascii_resource_);
+ input_ = factory->NewExternalStringFromAscii(&ascii_resource_);
// Inject the input as a global variable.
i::Handle<i::String> input_name =
- FACTORY->NewStringFromAscii(i::Vector<const char>("input", 5));
+ factory->NewStringFromAscii(i::Vector<const char>("input", 5));
i::Isolate::Current()->native_context()->global_object()->SetProperty(
*input_name,
*input_,
@@ -14312,12 +14321,13 @@ THREADED_TEST(Regress16276) {
THREADED_TEST(PixelArray) {
LocalContext context;
+ i::Factory* factory = i::Isolate::Current()->factory();
v8::HandleScope scope(context->GetIsolate());
const int kElementCount = 260;
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
i::Handle<i::ExternalPixelArray> pixels =
i::Handle<i::ExternalPixelArray>::cast(
- FACTORY->NewExternalArray(kElementCount,
+ factory->NewExternalArray(kElementCount,
v8::kExternalPixelArray,
pixel_data));
// Force GC to trigger verification.
@@ -14734,12 +14744,13 @@ static v8::Handle<Value> NotHandledIndexedPropertySetter(
THREADED_TEST(PixelArrayWithInterceptor) {
LocalContext context;
+ i::Factory* factory = i::Isolate::Current()->factory();
v8::HandleScope scope(context->GetIsolate());
const int kElementCount = 260;
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
i::Handle<i::ExternalPixelArray> pixels =
i::Handle<i::ExternalPixelArray>::cast(
- FACTORY->NewExternalArray(kElementCount,
+ factory->NewExternalArray(kElementCount,
v8::kExternalPixelArray,
pixel_data));
for (int i = 0; i < kElementCount; i++) {
@@ -15101,6 +15112,7 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
int64_t low,
int64_t high) {
LocalContext context;
+ i::Factory* factory = i::Isolate::Current()->factory();
v8::HandleScope scope(context->GetIsolate());
const int kElementCount = 40;
int element_size = ExternalArrayElementSize(array_type);
@@ -15108,7 +15120,7 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
static_cast<ElementType*>(malloc(kElementCount * element_size));
i::Handle<ExternalArrayClass> array =
i::Handle<ExternalArrayClass>::cast(
- FACTORY->NewExternalArray(kElementCount, array_type, array_data));
+ factory->NewExternalArray(kElementCount, array_type, array_data));
// Force GC to trigger verification.
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
for (int i = 0; i < kElementCount; i++) {
@@ -15460,12 +15472,14 @@ void TypedArrayTestHelper(v8::ExternalArrayType array_type,
i::FLAG_harmony_array_buffer = true;
i::FLAG_harmony_typed_arrays = true;
+ i::ScopedVector<ElementType> backing_store(kElementCount+2);
+
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
- Local<v8::ArrayBuffer> ab =
- v8::ArrayBuffer::New((kElementCount+2)*sizeof(ElementType));
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(
+ backing_store.start(), (kElementCount+2)*sizeof(ElementType));
Local<TypedArray> ta =
TypedArray::New(ab, 2*sizeof(ElementType), kElementCount);
CHECK_EQ(kElementCount, static_cast<int>(ta->Length()));
@@ -15474,7 +15488,7 @@ void TypedArrayTestHelper(v8::ExternalArrayType array_type,
static_cast<int>(ta->ByteLength()));
CHECK_EQ(ab, ta->Buffer());
- ElementType* data = static_cast<ElementType*>(ab->Data()) + 2;
+ ElementType* data = backing_store.start() + 2;
for (int i = 0; i < kElementCount; i++) {
data[i] = static_cast<ElementType>(i);
}
@@ -15941,8 +15955,13 @@ TEST(SourceURLInStackTrace) {
"}\n"
"foo();\n"
"}\n"
- "eval('(' + outer +')()//@ sourceURL=eval_url');";
- CHECK(CompileRun(source)->IsUndefined());
+ "eval('(' + outer +')()%s');";
+
+ i::ScopedVector<char> code(1024);
+ i::OS::SNPrintF(code, source, "//# sourceURL=eval_url");
+ CHECK(CompileRun(code.start())->IsUndefined());
+ i::OS::SNPrintF(code, source, "//@ sourceURL=eval_url");
+ CHECK(CompileRun(code.start())->IsUndefined());
}
@@ -15982,9 +16001,13 @@ TEST(InlineScriptWithSourceURLInStackTrace) {
"}\n"
"foo();\n"
"}\n"
- "outer()\n"
- "//@ sourceURL=source_url";
- CHECK(CompileRunWithOrigin(source, "url", 0, 1)->IsUndefined());
+ "outer()\n%s";
+
+ i::ScopedVector<char> code(1024);
+ i::OS::SNPrintF(code, source, "//# sourceURL=source_url");
+ CHECK(CompileRunWithOrigin(code.start(), "url", 0, 1)->IsUndefined());
+ i::OS::SNPrintF(code, source, "//@ sourceURL=source_url");
+ CHECK(CompileRunWithOrigin(code.start(), "url", 0, 1)->IsUndefined());
}
@@ -16024,16 +16047,21 @@ TEST(DynamicWithSourceURLInStackTrace) {
"}\n"
"foo();\n"
"}\n"
- "outer()\n"
- "//@ sourceURL=source_url";
- CHECK(CompileRunWithOrigin(source, "url", 0, 0)->IsUndefined());
+ "outer()\n%s";
+
+ i::ScopedVector<char> code(1024);
+ i::OS::SNPrintF(code, source, "//# sourceURL=source_url");
+ CHECK(CompileRunWithOrigin(code.start(), "url", 0, 0)->IsUndefined());
+ i::OS::SNPrintF(code, source, "//@ sourceURL=source_url");
+ CHECK(CompileRunWithOrigin(code.start(), "url", 0, 0)->IsUndefined());
}
static void CreateGarbageInOldSpace() {
+ i::Factory* factory = i::Isolate::Current()->factory();
v8::HandleScope scope(v8::Isolate::GetCurrent());
i::AlwaysAllocateScope always_allocate;
for (int i = 0; i < 1000; i++) {
- FACTORY->NewFixedArray(1000, i::TENURED);
+ factory->NewFixedArray(1000, i::TENURED);
}
}
@@ -17039,6 +17067,73 @@ THREADED_TEST(TwoByteStringInAsciiCons) {
}
+TEST(ContainsOnlyOneByte) {
+ v8::V8::Initialize();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ // Make a buffer long enough that it won't automatically be converted.
+ const int length = 512;
+ // Ensure word aligned assignment.
+ const int aligned_length = length*sizeof(uintptr_t)/sizeof(uint16_t);
+ i::SmartArrayPointer<uintptr_t>
+ aligned_contents(new uintptr_t[aligned_length]);
+ uint16_t* string_contents = reinterpret_cast<uint16_t*>(*aligned_contents);
+ // Set to contain only one byte.
+ for (int i = 0; i < length-1; i++) {
+ string_contents[i] = 0x41;
+ }
+ string_contents[length-1] = 0;
+ // Simple case.
+ Handle<String> string;
+ string = String::NewExternal(new TestResource(string_contents));
+ CHECK(!string->IsOneByte() && string->ContainsOnlyOneByte());
+ // Counter example.
+ string = String::NewFromTwoByte(isolate, string_contents);
+ CHECK(string->IsOneByte() && string->ContainsOnlyOneByte());
+ // Test left right and balanced cons strings.
+ Handle<String> base = String::NewFromUtf8(isolate, "a");
+ Handle<String> left = base;
+ Handle<String> right = base;
+ for (int i = 0; i < 1000; i++) {
+ left = String::Concat(base, left);
+ right = String::Concat(right, base);
+ }
+ Handle<String> balanced = String::Concat(left, base);
+ balanced = String::Concat(balanced, right);
+ Handle<String> cons_strings[] = {left, balanced, right};
+ Handle<String> two_byte =
+ String::NewExternal(new TestResource(string_contents));
+ for (size_t i = 0; i < ARRAY_SIZE(cons_strings); i++) {
+ // Base assumptions.
+ string = cons_strings[i];
+ CHECK(string->IsOneByte() && string->ContainsOnlyOneByte());
+ // Test left and right concatentation.
+ string = String::Concat(two_byte, cons_strings[i]);
+ CHECK(!string->IsOneByte() && string->ContainsOnlyOneByte());
+ string = String::Concat(cons_strings[i], two_byte);
+ CHECK(!string->IsOneByte() && string->ContainsOnlyOneByte());
+ }
+ // Set bits in different positions
+ // for strings of different lengths and alignments.
+ for (int alignment = 0; alignment < 7; alignment++) {
+ for (int size = 2; alignment + size < length; size *= 2) {
+ int zero_offset = size + alignment;
+ string_contents[zero_offset] = 0;
+ for (int i = 0; i < size; i++) {
+ int shift = 8 + (i % 7);
+ string_contents[alignment + i] = 1 << shift;
+ string =
+ String::NewExternal(new TestResource(string_contents + alignment));
+ CHECK_EQ(size, string->Length());
+ CHECK(!string->ContainsOnlyOneByte());
+ string_contents[alignment + i] = 0x41;
+ }
+ string_contents[zero_offset] = 0x41;
+ }
+ }
+}
+
+
// Failed access check callback that performs a GC on each invocation.
void FailedAccessCheckCallbackGC(Local<v8::Object> target,
v8::AccessType type,
@@ -17251,7 +17346,9 @@ TEST(RunTwoIsolatesOnSingleThread) {
{
v8::HandleScope scope(isolate1);
- v8::Context::Scope cscope(isolate1, context1);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate1, context1);
+ v8::Context::Scope context_scope(context);
// Run something in new isolate.
CompileRun("var foo = 'isolate 1';");
ExpectString("function f() { return foo; }; f()", "isolate 1");
@@ -17265,7 +17362,9 @@ TEST(RunTwoIsolatesOnSingleThread) {
v8::Isolate::Scope iscope(isolate2);
v8::HandleScope scope(isolate2);
context2.Reset(isolate2, Context::New(isolate2));
- v8::Context::Scope cscope(isolate2, context2);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate2, context2);
+ v8::Context::Scope context_scope(context);
// Run something in new isolate.
CompileRun("var foo = 'isolate 2';");
@@ -17274,7 +17373,9 @@ TEST(RunTwoIsolatesOnSingleThread) {
{
v8::HandleScope scope(isolate1);
- v8::Context::Scope cscope(isolate1, context1);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate1, context1);
+ v8::Context::Scope context_scope(context);
// Now again in isolate 1
ExpectString("function f() { return foo; }; f()", "isolate 1");
}
@@ -17292,7 +17393,9 @@ TEST(RunTwoIsolatesOnSingleThread) {
{
v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Context::Scope cscope(v8::Isolate::GetCurrent(), context_default);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(v8::Isolate::GetCurrent(), context_default);
+ v8::Context::Scope context_scope(context);
// Variables in other isolates should be not available, verify there
// is an exception.
ExpectTrue("function f() {"
@@ -17312,22 +17415,26 @@ TEST(RunTwoIsolatesOnSingleThread) {
{
v8::Isolate::Scope iscope(isolate2);
v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Context::Scope cscope(isolate2, context2);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate2, context2);
+ v8::Context::Scope context_scope(context);
ExpectString("function f() { return foo; }; f()", "isolate 2");
}
{
v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Context::Scope cscope(v8::Isolate::GetCurrent(), context1);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(v8::Isolate::GetCurrent(), context1);
+ v8::Context::Scope context_scope(context);
ExpectString("function f() { return foo; }; f()", "isolate 1");
}
{
v8::Isolate::Scope iscope(isolate2);
- context2.Dispose(context2->GetIsolate());
+ context2.Dispose();
}
- context1.Dispose(context1->GetIsolate());
+ context1.Dispose();
isolate1->Exit();
v8::V8::SetFatalErrorHandler(StoringErrorCallback);
@@ -17344,7 +17451,9 @@ TEST(RunTwoIsolatesOnSingleThread) {
// Check that default isolate still runs.
{
v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Context::Scope cscope(v8::Isolate::GetCurrent(), context_default);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(v8::Isolate::GetCurrent(), context_default);
+ v8::Context::Scope context_scope(context);
ExpectTrue("function f() { return isDefaultIsolate; }; f()");
}
}
@@ -17692,23 +17801,25 @@ TEST(DontDeleteCellLoadICAPI) {
class Visitor42 : public v8::PersistentHandleVisitor {
public:
- explicit Visitor42(v8::Persistent<v8::Object> object)
+ explicit Visitor42(v8::Persistent<v8::Object>* object)
: counter_(0), object_(object) { }
- virtual void VisitPersistentHandle(Persistent<Value> value,
+ virtual void VisitPersistentHandle(Persistent<Value>* value,
uint16_t class_id) {
- if (class_id == 42) {
- CHECK(value->IsObject());
- v8::Persistent<v8::Object> visited =
- v8::Persistent<v8::Object>::Cast(value);
- CHECK_EQ(42, visited.WrapperClassId(v8::Isolate::GetCurrent()));
- CHECK_EQ(Handle<Value>(*object_), Handle<Value>(*visited));
- ++counter_;
- }
+ if (class_id != 42) return;
+ CHECK_EQ(42, value->WrapperClassId());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::Value> handle = v8::Local<v8::Value>::New(isolate, *value);
+ v8::Handle<v8::Value> object =
+ v8::Local<v8::Object>::New(isolate, *object_);
+ CHECK(handle->IsObject());
+ CHECK_EQ(Handle<Object>::Cast(handle), object);
+ ++counter_;
}
int counter_;
- v8::Persistent<v8::Object> object_;
+ v8::Persistent<v8::Object>* object_;
};
@@ -17716,13 +17827,12 @@ TEST(PersistentHandleVisitor) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Persistent<v8::Object> object =
- v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ v8::Persistent<v8::Object> object(isolate, v8::Object::New());
CHECK_EQ(0, object.WrapperClassId(isolate));
object.SetWrapperClassId(isolate, 42);
CHECK_EQ(42, object.WrapperClassId(isolate));
- Visitor42 visitor(object);
+ Visitor42 visitor(&object);
v8::V8::VisitHandlesWithClassIds(&visitor);
CHECK_EQ(1, visitor.counter_);
@@ -17734,8 +17844,7 @@ TEST(WrapperClassId) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Persistent<v8::Object> object =
- v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ v8::Persistent<v8::Object> object(isolate, v8::Object::New());
CHECK_EQ(0, object.WrapperClassId(isolate));
object.SetWrapperClassId(isolate, 65535);
CHECK_EQ(65535, object.WrapperClassId(isolate));
@@ -17747,21 +17856,19 @@ TEST(PersistentHandleInNewSpaceVisitor) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Persistent<v8::Object> object1 =
- v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ v8::Persistent<v8::Object> object1(isolate, v8::Object::New());
CHECK_EQ(0, object1.WrapperClassId(isolate));
object1.SetWrapperClassId(isolate, 42);
CHECK_EQ(42, object1.WrapperClassId(isolate));
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- v8::Persistent<v8::Object> object2 =
- v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ v8::Persistent<v8::Object> object2(isolate, v8::Object::New());
CHECK_EQ(0, object2.WrapperClassId(isolate));
object2.SetWrapperClassId(isolate, 42);
CHECK_EQ(42, object2.WrapperClassId(isolate));
- Visitor42 visitor(object2);
+ Visitor42 visitor(&object2);
v8::V8::VisitHandlesForPartialDependence(isolate, &visitor);
CHECK_EQ(1, visitor.counter_);
@@ -18709,18 +18816,19 @@ static void CountingErrorCallback(const char* location, const char* message) {
TEST(StaticGetters) {
LocalContext context;
+ i::Factory* factory = i::Isolate::Current()->factory();
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope scope(isolate);
- i::Handle<i::Object> undefined_value = FACTORY->undefined_value();
+ i::Handle<i::Object> undefined_value = factory->undefined_value();
CHECK(*v8::Utils::OpenHandle(*v8::Undefined()) == *undefined_value);
CHECK(*v8::Utils::OpenHandle(*v8::Undefined(isolate)) == *undefined_value);
- i::Handle<i::Object> null_value = FACTORY->null_value();
+ i::Handle<i::Object> null_value = factory->null_value();
CHECK(*v8::Utils::OpenHandle(*v8::Null()) == *null_value);
CHECK(*v8::Utils::OpenHandle(*v8::Null(isolate)) == *null_value);
- i::Handle<i::Object> true_value = FACTORY->true_value();
+ i::Handle<i::Object> true_value = factory->true_value();
CHECK(*v8::Utils::OpenHandle(*v8::True()) == *true_value);
CHECK(*v8::Utils::OpenHandle(*v8::True(isolate)) == *true_value);
- i::Handle<i::Object> false_value = FACTORY->false_value();
+ i::Handle<i::Object> false_value = factory->false_value();
CHECK(*v8::Utils::OpenHandle(*v8::False()) == *false_value);
CHECK(*v8::Utils::OpenHandle(*v8::False(isolate)) == *false_value);
@@ -18771,9 +18879,10 @@ TEST(IsolateEmbedderData) {
TEST(StringEmpty) {
LocalContext context;
+ i::Factory* factory = i::Isolate::Current()->factory();
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope scope(isolate);
- i::Handle<i::Object> empty_string = FACTORY->empty_string();
+ i::Handle<i::Object> empty_string = factory->empty_string();
CHECK(*v8::Utils::OpenHandle(*v8::String::Empty()) == *empty_string);
CHECK(*v8::Utils::OpenHandle(*v8::String::Empty(isolate)) == *empty_string);
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 9acb90ab22..232f846be0 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -25,10 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "disassembler.h"
@@ -137,7 +133,7 @@ TEST(2) {
// some relocated stuff here, not executed
__ RecordComment("dead code, just testing relocations");
- __ mov(r0, Operand(FACTORY->true_value()));
+ __ mov(r0, Operand(isolate->factory()->true_value()));
__ RecordComment("dead code, just testing immediate operands");
__ mov(r0, Operand(-1));
__ mov(r0, Operand(0xFF000000));
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 7c8e70cdbb..880370f0f8 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -27,10 +27,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "disassembler.h"
@@ -141,7 +137,7 @@ TEST(AssemblerIa322) {
__ ret(0);
// some relocated stuff here, not executed
- __ mov(eax, FACTORY->true_value());
+ __ mov(eax, isolate->factory()->true_value());
__ jmp(NULL, RelocInfo::RUNTIME_ENTRY);
CodeDesc desc;
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index a989fbbd44..669475ad8a 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -27,10 +27,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "macro-assembler.h"
diff --git a/deps/v8/test/cctest/test-compare-nil-ic-stub.cc b/deps/v8/test/cctest/test-compare-nil-ic-stub.cc
index 6177fde166..affb8bd637 100644
--- a/deps/v8/test/cctest/test-compare-nil-ic-stub.cc
+++ b/deps/v8/test/cctest/test-compare-nil-ic-stub.cc
@@ -46,9 +46,8 @@ TEST(TypeConstructors) {
TEST(ExternalICStateParsing) {
Types types;
types.Add(CompareNilICStub::UNDEFINED);
- CompareNilICStub stub(kNonStrictEquality, kUndefinedValue, types);
+ CompareNilICStub stub(kUndefinedValue, types);
CompareNilICStub stub2(stub.GetExtraICState());
- CHECK_EQ(stub.GetKind(), stub2.GetKind());
CHECK_EQ(stub.GetNilValue(), stub2.GetNilValue());
CHECK_EQ(stub.GetTypes().ToIntegral(), stub2.GetTypes().ToIntegral());
}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index cff0f82414..b74ccb2b8d 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -28,10 +28,6 @@
#include <stdlib.h>
#include <wchar.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "compiler.h"
@@ -84,9 +80,10 @@ v8::DeclareExtension kPrintExtensionDeclaration(&kPrintExtension);
static MaybeObject* GetGlobalProperty(const char* name) {
- Handle<String> internalized_name = FACTORY->InternalizeUtf8String(name);
- return Isolate::Current()->context()->global_object()->GetProperty(
- *internalized_name);
+ Isolate* isolate = Isolate::Current();
+ Handle<String> internalized_name =
+ isolate->factory()->InternalizeUtf8String(name);
+ return isolate->context()->global_object()->GetProperty(*internalized_name);
}
@@ -101,19 +98,21 @@ static void SetGlobalProperty(const char* name, Object* value) {
static Handle<JSFunction> Compile(const char* source) {
- Handle<String> source_code(FACTORY->NewStringFromUtf8(CStrVector(source)));
+ Isolate* isolate = Isolate::Current();
+ Handle<String> source_code(
+ isolate->factory()->NewStringFromUtf8(CStrVector(source)));
Handle<SharedFunctionInfo> shared_function =
Compiler::Compile(source_code,
Handle<String>(),
0,
0,
- Handle<Context>(Isolate::Current()->native_context()),
+ Handle<Context>(isolate->native_context()),
NULL,
NULL,
Handle<String>::null(),
NOT_NATIVES_CODE);
- return FACTORY->NewFunctionFromSharedFunctionInfo(shared_function,
- Isolate::Current()->native_context());
+ return isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared_function, isolate->native_context());
}
@@ -287,16 +286,15 @@ TEST(C2JSFrames) {
Execution::Call(fun0, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
- Object* foo_string =
- FACTORY->InternalizeOneByteString(STATIC_ASCII_VECTOR("foo"))->
- ToObjectChecked();
+ Object* foo_string = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("foo"))->ToObjectChecked();
MaybeObject* fun1_object = isolate->context()->global_object()->
GetProperty(String::cast(foo_string));
Handle<Object> fun1(fun1_object->ToObjectChecked(), isolate);
CHECK(fun1->IsJSFunction());
- Handle<Object> argv[] =
- { FACTORY->InternalizeOneByteString(STATIC_ASCII_VECTOR("hello")) };
+ Handle<Object> argv[] = { isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("hello")) };
Execution::Call(Handle<JSFunction>::cast(fun1),
global,
ARRAY_SIZE(argv),
@@ -310,9 +308,11 @@ TEST(C2JSFrames) {
// source resulted in crash.
TEST(Regression236) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
- Handle<Script> script = FACTORY->NewScript(FACTORY->empty_string());
+ Handle<Script> script = factory->NewScript(factory->empty_string());
script->set_source(HEAP->undefined_value());
CHECK_EQ(-1, GetScriptLineNumber(script, 0));
CHECK_EQ(-1, GetScriptLineNumber(script, 100));
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 22af9e75b3..a615fe954e 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -27,15 +27,13 @@
//
// Tests of profiles generator and utilities.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
+#define V8_DISABLE_DEPRECATIONS 1
#include "v8.h"
#include "cpu-profiler-inl.h"
#include "cctest.h"
#include "utils.h"
#include "../include/v8-profiler.h"
+#undef V8_DISABLE_DEPRECATIONS
using i::CodeEntry;
using i::CpuProfile;
@@ -297,6 +295,19 @@ TEST(DeleteAllCpuProfiles) {
}
+static const v8::CpuProfile* FindCpuProfile(v8::CpuProfiler* profiler,
+ unsigned uid) {
+ int length = profiler->GetProfileCount();
+ for (int i = 0; i < length; i++) {
+ const v8::CpuProfile* profile = profiler->GetCpuProfile(i);
+ if (profile->GetUid() == uid) {
+ return profile;
+ }
+ }
+ return NULL;
+}
+
+
TEST(DeleteCpuProfile) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -309,10 +320,10 @@ TEST(DeleteCpuProfile) {
CHECK_NE(NULL, p1);
CHECK_EQ(1, cpu_profiler->GetProfileCount());
unsigned uid1 = p1->GetUid();
- CHECK_EQ(p1, cpu_profiler->FindCpuProfile(uid1));
+ CHECK_EQ(p1, FindCpuProfile(cpu_profiler, uid1));
const_cast<v8::CpuProfile*>(p1)->Delete();
CHECK_EQ(0, cpu_profiler->GetProfileCount());
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid1));
+ CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid1));
v8::Local<v8::String> name2 = v8::String::New("2");
cpu_profiler->StartCpuProfiling(name2);
@@ -321,8 +332,8 @@ TEST(DeleteCpuProfile) {
CHECK_EQ(1, cpu_profiler->GetProfileCount());
unsigned uid2 = p2->GetUid();
CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid2));
- CHECK_EQ(p2, cpu_profiler->FindCpuProfile(uid2));
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid1));
+ CHECK_EQ(p2, FindCpuProfile(cpu_profiler, uid2));
+ CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid1));
v8::Local<v8::String> name3 = v8::String::New("3");
cpu_profiler->StartCpuProfiling(name3);
const v8::CpuProfile* p3 = cpu_profiler->StopCpuProfiling(name3);
@@ -330,17 +341,17 @@ TEST(DeleteCpuProfile) {
CHECK_EQ(2, cpu_profiler->GetProfileCount());
unsigned uid3 = p3->GetUid();
CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid3));
- CHECK_EQ(p3, cpu_profiler->FindCpuProfile(uid3));
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid1));
+ CHECK_EQ(p3, FindCpuProfile(cpu_profiler, uid3));
+ CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid1));
const_cast<v8::CpuProfile*>(p2)->Delete();
CHECK_EQ(1, cpu_profiler->GetProfileCount());
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid2));
- CHECK_EQ(p3, cpu_profiler->FindCpuProfile(uid3));
+ CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid2));
+ CHECK_EQ(p3, FindCpuProfile(cpu_profiler, uid3));
const_cast<v8::CpuProfile*>(p3)->Delete();
CHECK_EQ(0, cpu_profiler->GetProfileCount());
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid3));
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid2));
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid1));
+ CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid3));
+ CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid2));
+ CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid1));
}
@@ -434,17 +445,23 @@ static const v8::CpuProfileNode* FindChild(const v8::CpuProfileNode* node,
const v8::CpuProfileNode* child = node->GetChild(i);
if (nameHandle->Equals(child->GetFunctionName())) return child;
}
- CHECK(false);
return NULL;
}
+static const v8::CpuProfileNode* GetChild(const v8::CpuProfileNode* node,
+ const char* name) {
+ const v8::CpuProfileNode* result = FindChild(node, name);
+ CHECK(result);
+ return result;
+}
+
+
static void CheckSimpleBranch(const v8::CpuProfileNode* node,
const char* names[], int length) {
for (int i = 0; i < length; i++) {
const char* name = names[i];
- node = FindChild(node, name);
- CHECK(node);
+ node = GetChild(node, name);
int expectedChildrenCount = (i == length - 1) ? 0 : 1;
CHECK_EQ(expectedChildrenCount, node->GetChildrenCount());
}
@@ -535,10 +552,10 @@ TEST(CollectCpuProfile) {
names[2] = v8::String::New("start");
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode = FindChild(root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* fooNode = FindChild(startNode, "foo");
+ const v8::CpuProfileNode* fooNode = GetChild(startNode, "foo");
CHECK_EQ(3, fooNode->GetChildrenCount());
const char* barBranch[] = { "bar", "delay", "loop" };
@@ -612,12 +629,291 @@ TEST(SampleWhenFrameIsNotSetup) {
// check there.
if (startNode && startNode->GetChildrenCount() > 0) {
CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* delayNode = FindChild(startNode, "delay");
+ const v8::CpuProfileNode* delayNode = GetChild(startNode, "delay");
if (delayNode->GetChildrenCount() > 0) {
CHECK_EQ(1, delayNode->GetChildrenCount());
- FindChild(delayNode, "loop");
+ GetChild(delayNode, "loop");
+ }
+ }
+
+ cpu_profiler->DeleteAllCpuProfiles();
+}
+
+
+static const char* native_accessor_test_source = "function start(count) {\n"
+" for (var i = 0; i < count; i++) {\n"
+" var o = instance.foo;\n"
+" instance.foo = o + 1;\n"
+" }\n"
+"}\n";
+
+
+class TestApiCallbacks {
+ public:
+ explicit TestApiCallbacks(int min_duration_ms)
+ : min_duration_ms_(min_duration_ms),
+ is_warming_up_(false) {}
+
+ static v8::Handle<v8::Value> Getter(v8::Local<v8::String> name,
+ const v8::AccessorInfo& info) {
+ TestApiCallbacks* data = fromInfo(info);
+ data->Wait();
+ return v8::Int32::New(2013);
+ }
+
+ static void Setter(v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::AccessorInfo& info) {
+ TestApiCallbacks* data = fromInfo(info);
+ data->Wait();
+ }
+
+ static void Callback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ TestApiCallbacks* data = fromInfo(info);
+ data->Wait();
+ }
+
+ void set_warming_up(bool value) { is_warming_up_ = value; }
+
+ private:
+ void Wait() {
+ if (is_warming_up_) return;
+ double start = i::OS::TimeCurrentMillis();
+ double duration = 0;
+ while (duration < min_duration_ms_) {
+ i::OS::Sleep(1);
+ duration = i::OS::TimeCurrentMillis() - start;
}
}
+ static TestApiCallbacks* fromInfo(const v8::AccessorInfo& info) {
+ void* data = v8::External::Cast(*info.Data())->Value();
+ return reinterpret_cast<TestApiCallbacks*>(data);
+ }
+
+ static TestApiCallbacks* fromInfo(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ void* data = v8::External::Cast(*info.Data())->Value();
+ return reinterpret_cast<TestApiCallbacks*>(data);
+ }
+
+ int min_duration_ms_;
+ bool is_warming_up_;
+};
+
+
+// Test that native accessors are properly reported in the CPU profile.
+// This test checks the case when the long-running accessors are called
+// only once and the optimizer doesn't have chance to change the invocation
+// code.
+TEST(NativeAccessorUninitializedIC) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+
+ v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
+ v8::Local<v8::ObjectTemplate> instance_template =
+ func_template->InstanceTemplate();
+
+ TestApiCallbacks accessors(100);
+ v8::Local<v8::External> data = v8::External::New(&accessors);
+ instance_template->SetAccessor(
+ v8::String::New("foo"), &TestApiCallbacks::Getter,
+ &TestApiCallbacks::Setter, data);
+ v8::Local<v8::Function> func = func_template->GetFunction();
+ v8::Local<v8::Object> instance = func->NewInstance();
+ env->Global()->Set(v8::String::New("instance"), instance);
+
+ v8::Script::Compile(v8::String::New(native_accessor_test_source))->Run();
+ v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("start")));
+
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+
+ cpu_profiler->StartCpuProfiling(profile_name);
+ int32_t repeat_count = 1;
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+ function->Call(env->Global(), ARRAY_SIZE(args), args);
+ const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
+
+ CHECK_NE(NULL, profile);
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(
+ const_cast<v8::CpuProfile*>(profile))->Print();
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ GetChild(startNode, "get foo");
+ GetChild(startNode, "set foo");
+
+ cpu_profiler->DeleteAllCpuProfiles();
+}
+
+
+// Test that native accessors are properly reported in the CPU profile.
+// This test makes sure that the accessors are called enough times to become
+// hot and to trigger optimizations.
+TEST(NativeAccessorMonomorphicIC) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+
+ v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
+ v8::Local<v8::ObjectTemplate> instance_template =
+ func_template->InstanceTemplate();
+
+ TestApiCallbacks accessors(1);
+ v8::Local<v8::External> data = v8::External::New(&accessors);
+ instance_template->SetAccessor(
+ v8::String::New("foo"), &TestApiCallbacks::Getter,
+ &TestApiCallbacks::Setter, data);
+ v8::Local<v8::Function> func = func_template->GetFunction();
+ v8::Local<v8::Object> instance = func->NewInstance();
+ env->Global()->Set(v8::String::New("instance"), instance);
+
+ v8::Script::Compile(v8::String::New(native_accessor_test_source))->Run();
+ v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("start")));
+
+ {
+ // Make sure accessors ICs are in monomorphic state before starting
+ // profiling.
+ accessors.set_warming_up(true);
+ int32_t warm_up_iterations = 3;
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(warm_up_iterations) };
+ function->Call(env->Global(), ARRAY_SIZE(args), args);
+ accessors.set_warming_up(false);
+ }
+
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+
+ cpu_profiler->StartCpuProfiling(profile_name);
+ int32_t repeat_count = 100;
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+ function->Call(env->Global(), ARRAY_SIZE(args), args);
+ const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
+
+ CHECK_NE(NULL, profile);
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(
+ const_cast<v8::CpuProfile*>(profile))->Print();
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ // TODO(yurys): in LoadIC should be changed to report external callback
+ // invocation. See r13768 where it was LoadCallbackProperty was removed.
+ // GetChild(startNode, "get foo");
+ GetChild(startNode, "set foo");
+
+ cpu_profiler->DeleteAllCpuProfiles();
+}
+
+
+static const char* native_method_test_source = "function start(count) {\n"
+" for (var i = 0; i < count; i++) {\n"
+" instance.fooMethod();\n"
+" }\n"
+"}\n";
+
+
+TEST(NativeMethodUninitializedIC) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ TestApiCallbacks callbacks(100);
+ v8::Local<v8::External> data = v8::External::New(&callbacks);
+
+ v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
+ func_template->SetClassName(v8::String::New("Test_InstanceCostructor"));
+ v8::Local<v8::ObjectTemplate> proto_template =
+ func_template->PrototypeTemplate();
+ v8::Local<v8::Signature> signature = v8::Signature::New(func_template);
+ proto_template->Set(v8::String::New("fooMethod"), v8::FunctionTemplate::New(
+ &TestApiCallbacks::Callback, data, signature, 0));
+
+ v8::Local<v8::Function> func = func_template->GetFunction();
+ v8::Local<v8::Object> instance = func->NewInstance();
+ env->Global()->Set(v8::String::New("instance"), instance);
+
+ v8::Script::Compile(v8::String::New(native_method_test_source))->Run();
+ v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("start")));
+
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+
+ cpu_profiler->StartCpuProfiling(profile_name);
+ int32_t repeat_count = 1;
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+ function->Call(env->Global(), ARRAY_SIZE(args), args);
+ const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
+
+ CHECK_NE(NULL, profile);
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(
+ const_cast<v8::CpuProfile*>(profile))->Print();
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ GetChild(startNode, "fooMethod");
+
+ cpu_profiler->DeleteAllCpuProfiles();
+}
+
+
+TEST(NativeMethodMonomorphicIC) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ TestApiCallbacks callbacks(1);
+ v8::Local<v8::External> data = v8::External::New(&callbacks);
+
+ v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
+ func_template->SetClassName(v8::String::New("Test_InstanceCostructor"));
+ v8::Local<v8::ObjectTemplate> proto_template =
+ func_template->PrototypeTemplate();
+ v8::Local<v8::Signature> signature = v8::Signature::New(func_template);
+ proto_template->Set(v8::String::New("fooMethod"), v8::FunctionTemplate::New(
+ &TestApiCallbacks::Callback, data, signature, 0));
+
+ v8::Local<v8::Function> func = func_template->GetFunction();
+ v8::Local<v8::Object> instance = func->NewInstance();
+ env->Global()->Set(v8::String::New("instance"), instance);
+
+ v8::Script::Compile(v8::String::New(native_method_test_source))->Run();
+ v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("start")));
+ {
+ // Make sure method ICs are in monomorphic state before starting
+ // profiling.
+ callbacks.set_warming_up(true);
+ int32_t warm_up_iterations = 3;
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(warm_up_iterations) };
+ function->Call(env->Global(), ARRAY_SIZE(args), args);
+ callbacks.set_warming_up(false);
+ }
+
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+
+ cpu_profiler->StartCpuProfiling(profile_name);
+ int32_t repeat_count = 100;
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+ function->Call(env->Global(), ARRAY_SIZE(args), args);
+ const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
+
+ CHECK_NE(NULL, profile);
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(
+ const_cast<v8::CpuProfile*>(profile))->Print();
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ GetChild(root, "start");
+ // TODO(yurys): in CallIC should be changed to report external callback
+ // invocation.
+ // GetChild(startNode, "fooMethod");
+
cpu_profiler->DeleteAllCpuProfiles();
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index c4df73ebbd..b22092a263 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -29,10 +29,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "api.h"
@@ -153,6 +149,7 @@ class DebugLocalContext {
void ExposeDebug() {
v8::internal::Isolate* isolate =
reinterpret_cast<v8::internal::Isolate*>(context_->GetIsolate());
+ v8::internal::Factory* factory = isolate->factory();
v8::internal::Debug* debug = isolate->debug();
// Expose the debug context global object in the global object for testing.
debug->Load();
@@ -162,7 +159,7 @@ class DebugLocalContext {
Handle<JSGlobalProxy> global(Handle<JSGlobalProxy>::cast(
v8::Utils::OpenHandle(*context_->Global())));
Handle<v8::internal::String> debug_string =
- FACTORY->InternalizeOneByteString(STATIC_ASCII_VECTOR("debug"));
+ factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("debug"));
SetProperty(isolate, global, debug_string,
Handle<Object>(debug->debug_context()->global_proxy(), isolate),
DONT_ENUM,
@@ -408,7 +405,7 @@ Handle<FixedArray> GetDebuggedFunctions() {
// Allocate array for the debugged functions
Handle<FixedArray> debugged_functions =
- FACTORY->NewFixedArray(count);
+ Isolate::Current()->factory()->NewFixedArray(count);
// Run through the debug info objects and collect all functions.
count = 0;
@@ -6626,7 +6623,15 @@ TEST(ScriptCollectedEventContext) {
v8::HandleScope scope(isolate);
context.Reset(isolate, v8::Context::New(isolate));
}
- context->Enter();
+
+ // Enter context. We can't have a handle to the context in the outer
+ // scope, so we have to do it the hard way.
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> local_context =
+ v8::Local<v8::Context>::New(isolate, context);
+ local_context->Enter();
+ }
// Request the loaded scripts to initialize the debugger script cache.
debug->GetLoadedScripts();
@@ -6639,7 +6644,13 @@ TEST(ScriptCollectedEventContext) {
v8::Script::Compile(v8::String::New("eval('a=1')"))->Run();
v8::Script::Compile(v8::String::New("eval('a=2')"))->Run();
- context->Exit();
+ // Leave context
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> local_context =
+ v8::Local<v8::Context>::New(isolate, context);
+ local_context->Exit();
+ }
context.Dispose(isolate);
// Do garbage collection to collect the script above which is no longer
diff --git a/deps/v8/test/cctest/test-declarative-accessors.cc b/deps/v8/test/cctest/test-declarative-accessors.cc
index b09a29d1e8..a14c7fae20 100644
--- a/deps/v8/test/cctest/test-declarative-accessors.cc
+++ b/deps/v8/test/cctest/test-declarative-accessors.cc
@@ -27,9 +27,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "cctest.h"
@@ -298,7 +295,6 @@ TEST(HandleDereferenceRead) {
->NewHandleDereference(helper.isolate_);
HandleArray* array = *helper.handle_array_;
v8::Handle<v8::String> expected = v8_str("whatever");
- array->handles_[index] = v8::Persistent<v8::Value>::New(helper.isolate_,
- expected);
+ array->handles_[index].Reset(helper.isolate_, expected);
VerifyRead(descriptor, internal_field, array, expected);
}
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 6be5303cde..de45cbcdb8 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -27,11 +27,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "heap.h"
@@ -57,8 +52,11 @@ class DeclarationContext {
virtual ~DeclarationContext() {
if (is_initialized_) {
- context_->Exit();
- context_.Dispose(context_->GetIsolate());
+ Isolate* isolate = Isolate::GetCurrent();
+ HandleScope scope(isolate);
+ Local<Context> context = Local<Context>::New(isolate, context_);
+ context->Exit();
+ context_.Dispose(isolate);
}
}
@@ -127,14 +125,14 @@ void DeclarationContext::InitializeIfNeeded() {
&HandleQuery,
0, 0,
data);
- context_.Reset(isolate,
- Context::New(isolate,
- 0,
- function->InstanceTemplate(),
- Local<Value>()));
- context_->Enter();
+ Local<Context> context = Context::New(isolate,
+ 0,
+ function->InstanceTemplate(),
+ Local<Value>());
+ context_.Reset(isolate, context);
+ context->Enter();
is_initialized_ = true;
- PostInitializeContext(Local<Context>::New(isolate, context_));
+ PostInitializeContext(context);
}
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 32fff60617..27c4fe4690 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -41,10 +41,12 @@ using namespace v8::internal;
TEST(ObjectHashTable) {
LocalContext context;
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
- Handle<ObjectHashTable> table = FACTORY->NewObjectHashTable(23);
- Handle<JSObject> a = FACTORY->NewJSArray(7);
- Handle<JSObject> b = FACTORY->NewJSArray(11);
+ Handle<ObjectHashTable> table = factory->NewObjectHashTable(23);
+ Handle<JSObject> a = factory->NewJSArray(7);
+ Handle<JSObject> b = factory->NewJSArray(11);
table = PutIntoObjectHashTable(table, a, b);
CHECK_EQ(table->NumberOfElements(), 1);
CHECK_EQ(table->Lookup(*a), *b);
@@ -57,12 +59,12 @@ TEST(ObjectHashTable) {
CHECK_EQ(table->Lookup(*b), HEAP->the_hole_value());
// Keys that are overwritten should not change number of elements.
- table = PutIntoObjectHashTable(table, a, FACTORY->NewJSArray(13));
+ table = PutIntoObjectHashTable(table, a, factory->NewJSArray(13));
CHECK_EQ(table->NumberOfElements(), 1);
CHECK_NE(table->Lookup(*a), *b);
// Keys mapped to the hole should be removed permanently.
- table = PutIntoObjectHashTable(table, a, FACTORY->the_hole_value());
+ table = PutIntoObjectHashTable(table, a, factory->the_hole_value());
CHECK_EQ(table->NumberOfElements(), 0);
CHECK_EQ(table->NumberOfDeletedElements(), 1);
CHECK_EQ(table->Lookup(*a), HEAP->the_hole_value());
@@ -70,8 +72,8 @@ TEST(ObjectHashTable) {
// Keys should map back to their respective values and also should get
// an identity hash code generated.
for (int i = 0; i < 100; i++) {
- Handle<JSObject> key = FACTORY->NewJSArray(7);
- Handle<JSObject> value = FACTORY->NewJSArray(11);
+ Handle<JSObject> key = factory->NewJSArray(7);
+ Handle<JSObject> value = factory->NewJSArray(11);
table = PutIntoObjectHashTable(table, key, value);
CHECK_EQ(table->NumberOfElements(), i + 1);
CHECK_NE(table->FindEntry(*key), ObjectHashTable::kNotFound);
@@ -82,7 +84,7 @@ TEST(ObjectHashTable) {
// Keys never added to the map which already have an identity hash
// code should not be found.
for (int i = 0; i < 100; i++) {
- Handle<JSObject> key = FACTORY->NewJSArray(7);
+ Handle<JSObject> key = factory->NewJSArray(7);
CHECK(key->GetIdentityHash(ALLOW_CREATION)->ToObjectChecked()->IsSmi());
CHECK_EQ(table->FindEntry(*key), ObjectHashTable::kNotFound);
CHECK_EQ(table->Lookup(*key), HEAP->the_hole_value());
@@ -92,7 +94,7 @@ TEST(ObjectHashTable) {
// Keys that don't have an identity hash should not be found and also
// should not get an identity hash code generated.
for (int i = 0; i < 100; i++) {
- Handle<JSObject> key = FACTORY->NewJSArray(7);
+ Handle<JSObject> key = factory->NewJSArray(7);
CHECK_EQ(table->Lookup(*key), HEAP->the_hole_value());
CHECK_EQ(key->GetIdentityHash(OMIT_CREATION), HEAP->undefined_value());
}
@@ -102,9 +104,11 @@ TEST(ObjectHashTable) {
#ifdef DEBUG
TEST(ObjectHashSetCausesGC) {
LocalContext context;
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
- Handle<ObjectHashSet> table = FACTORY->NewObjectHashSet(1);
- Handle<JSObject> key = FACTORY->NewJSArray(0);
+ Handle<ObjectHashSet> table = factory->NewObjectHashSet(1);
+ Handle<JSObject> key = factory->NewJSArray(0);
v8::Handle<v8::Object> key_obj = v8::Utils::ToLocal(key);
// Force allocation of hash table backing store for hidden properties.
@@ -132,9 +136,11 @@ TEST(ObjectHashSetCausesGC) {
#ifdef DEBUG
TEST(ObjectHashTableCausesGC) {
LocalContext context;
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
- Handle<ObjectHashTable> table = FACTORY->NewObjectHashTable(1);
- Handle<JSObject> key = FACTORY->NewJSArray(0);
+ Handle<ObjectHashTable> table = factory->NewObjectHashTable(1);
+ Handle<JSObject> key = factory->NewJSArray(0);
v8::Handle<v8::Object> key_obj = v8::Utils::ToLocal(key);
// Force allocation of hash table backing store for hidden properties.
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 84f0d8630f..85b472d30a 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -28,10 +28,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "debug.h"
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index f81b173e13..14447b2c45 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -27,10 +27,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "debug.h"
@@ -65,7 +61,7 @@ TEST(DisasmIa320) {
__ sub(eax, Immediate(12345678));
__ xor_(eax, 12345678);
__ and_(eax, 12345678);
- Handle<FixedArray> foo = FACTORY->NewFixedArray(10, TENURED);
+ Handle<FixedArray> foo = isolate->factory()->NewFixedArray(10, TENURED);
__ cmp(eax, foo);
// ---- This one caused crash
@@ -96,7 +92,7 @@ TEST(DisasmIa320) {
__ cmp(edx, 3);
__ cmp(edx, Operand(esp, 4));
__ cmp(Operand(ebp, ecx, times_4, 0), Immediate(1000));
- Handle<FixedArray> foo2 = FACTORY->NewFixedArray(10, TENURED);
+ Handle<FixedArray> foo2 = isolate->factory()->NewFixedArray(10, TENURED);
__ cmp(ebx, foo2);
__ cmpb(ebx, Operand(ebp, ecx, times_2, 0));
__ cmpb(Operand(ebp, ecx, times_2, 0), ebx);
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 5ebc679c12..a0c4b1e728 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -26,11 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "api.h"
@@ -56,6 +51,9 @@ using ::v8::internal::String;
static void CheckFunctionName(v8::Handle<v8::Script> script,
const char* func_pos_src,
const char* ref_inferred_name) {
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+
// Get script source.
Handle<Object> obj = v8::Utils::OpenHandle(*script);
Handle<SharedFunctionInfo> shared_function;
@@ -72,8 +70,8 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
// Find the position of a given func source substring in the source.
Handle<String> func_pos_str =
- FACTORY->NewStringFromAscii(CStrVector(func_pos_src));
- int func_pos = Runtime::StringMatch(Isolate::Current(),
+ factory->NewStringFromAscii(CStrVector(func_pos_src));
+ int func_pos = Runtime::StringMatch(isolate,
script_src,
func_pos_str,
0);
@@ -81,10 +79,9 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
#ifdef ENABLE_DEBUGGER_SUPPORT
// Obtain SharedFunctionInfo for the function.
- Isolate::Current()->debug()->PrepareForBreakPoints();
+ isolate->debug()->PrepareForBreakPoints();
Object* shared_func_info_ptr =
- Isolate::Current()->debug()->FindSharedFunctionInfoInScript(i_script,
- func_pos);
+ isolate->debug()->FindSharedFunctionInfoInScript(i_script, func_pos);
CHECK(shared_func_info_ptr != HEAP->undefined_value());
Handle<SharedFunctionInfo> shared_func_info(
SharedFunctionInfo::cast(shared_func_info_ptr));
@@ -263,6 +260,57 @@ TEST(MultipleFuncsInLiteral) {
}
+TEST(AnonymousInAnonymousClosure1) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::Handle<v8::Script> script = Compile(
+ "(function() {\n"
+ " (function() {\n"
+ " var a = 1;\n"
+ " return;\n"
+ " })();\n"
+ " var b = function() {\n"
+ " var c = 1;\n"
+ " return;\n"
+ " };\n"
+ "})();");
+ CheckFunctionName(script, "return", "");
+}
+
+
+TEST(AnonymousInAnonymousClosure2) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::Handle<v8::Script> script = Compile(
+ "(function() {\n"
+ " (function() {\n"
+ " var a = 1;\n"
+ " return;\n"
+ " })();\n"
+ " var c = 1;\n"
+ "})();");
+ CheckFunctionName(script, "return", "");
+}
+
+
+TEST(NamedInAnonymousClosure) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::Handle<v8::Script> script = Compile(
+ "var foo = function() {\n"
+ " (function named() {\n"
+ " var a = 1;\n"
+ " })();\n"
+ " var c = 1;\n"
+ " return;\n"
+ "};");
+ CheckFunctionName(script, "return", "foo");
+}
+
+
// See http://code.google.com/p/v8/issues/detail?id=380
TEST(Issue380) {
CcTest::InitializeVM();
diff --git a/deps/v8/test/cctest/test-hashing.cc b/deps/v8/test/cctest/test-hashing.cc
index 1547613139..4906296ba2 100644
--- a/deps/v8/test/cctest/test-hashing.cc
+++ b/deps/v8/test/cctest/test-hashing.cc
@@ -27,9 +27,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "factory.h"
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 595a2069d2..254cd1e490 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -29,9 +29,6 @@
#include <ctype.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "cctest.h"
@@ -1137,14 +1134,11 @@ TEST(HeapSnapshotRetainedObjectInfo) {
1, TestRetainedObjectInfo::WrapperInfoCallback);
heap_profiler->SetWrapperClassInfoProvider(
2, TestRetainedObjectInfo::WrapperInfoCallback);
- v8::Persistent<v8::String> p_AAA =
- v8::Persistent<v8::String>::New(isolate, v8_str("AAA"));
+ v8::Persistent<v8::String> p_AAA(isolate, v8_str("AAA"));
p_AAA.SetWrapperClassId(isolate, 1);
- v8::Persistent<v8::String> p_BBB =
- v8::Persistent<v8::String>::New(isolate, v8_str("BBB"));
+ v8::Persistent<v8::String> p_BBB(isolate, v8_str("BBB"));
p_BBB.SetWrapperClassId(isolate, 1);
- v8::Persistent<v8::String> p_CCC =
- v8::Persistent<v8::String>::New(isolate, v8_str("CCC"));
+ v8::Persistent<v8::String> p_CCC(isolate, v8_str("CCC"));
p_CCC.SetWrapperClassId(isolate, 2);
CHECK_EQ(0, TestRetainedObjectInfo::instances.length());
const v8::HeapSnapshot* snapshot =
@@ -1196,8 +1190,7 @@ class GraphWithImplicitRefs {
instance_ = this;
isolate_ = (*env)->GetIsolate();
for (int i = 0; i < kObjectsCount; i++) {
- objects_[i] =
- v8::Persistent<v8::Object>::New(isolate_, v8::Object::New());
+ objects_[i].Reset(isolate_, v8::Object::New());
}
(*env)->Global()->Set(v8_str("root_object"),
v8::Local<v8::Value>::New(isolate_, objects_[0]));
@@ -1213,15 +1206,15 @@ class GraphWithImplicitRefs {
private:
void AddImplicitReferences() {
// 0 -> 1
- isolate_->SetObjectGroupId(v8::Persistent<v8::Object>::Cast(objects_[0]),
+ isolate_->SetObjectGroupId(objects_[0],
v8::UniqueId(1));
isolate_->SetReferenceFromGroup(
- v8::UniqueId(1), v8::Persistent<v8::Object>::Cast(objects_[1]));
+ v8::UniqueId(1), objects_[1]);
// Adding two more references: 1 -> 2, 1 -> 3
- isolate_->SetReference(v8::Persistent<v8::Object>::Cast(objects_[1]),
- v8::Persistent<v8::Object>::Cast(objects_[2]));
- isolate_->SetReference(v8::Persistent<v8::Object>::Cast(objects_[1]),
- v8::Persistent<v8::Object>::Cast(objects_[3]));
+ isolate_->SetReference(objects_[1].As<v8::Object>(),
+ objects_[2]);
+ isolate_->SetReference(objects_[1].As<v8::Object>(),
+ objects_[3]);
}
v8::Persistent<v8::Value> objects_[kObjectsCount];
@@ -1285,6 +1278,19 @@ TEST(DeleteAllHeapSnapshots) {
}
+static const v8::HeapSnapshot* FindHeapSnapshot(v8::HeapProfiler* profiler,
+ unsigned uid) {
+ int length = profiler->GetSnapshotCount();
+ for (int i = 0; i < length; i++) {
+ const v8::HeapSnapshot* snapshot = profiler->GetHeapSnapshot(i);
+ if (snapshot->GetUid() == uid) {
+ return snapshot;
+ }
+ }
+ return NULL;
+}
+
+
TEST(DeleteHeapSnapshot) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1296,10 +1302,10 @@ TEST(DeleteHeapSnapshot) {
CHECK_NE(NULL, s1);
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
unsigned uid1 = s1->GetUid();
- CHECK_EQ(s1, heap_profiler->FindHeapSnapshot(uid1));
+ CHECK_EQ(s1, FindHeapSnapshot(heap_profiler, uid1));
const_cast<v8::HeapSnapshot*>(s1)->Delete();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- CHECK_EQ(NULL, heap_profiler->FindHeapSnapshot(uid1));
+ CHECK_EQ(NULL, FindHeapSnapshot(heap_profiler, uid1));
const v8::HeapSnapshot* s2 =
heap_profiler->TakeHeapSnapshot(v8_str("2"));
@@ -1307,21 +1313,21 @@ TEST(DeleteHeapSnapshot) {
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
unsigned uid2 = s2->GetUid();
CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid2));
- CHECK_EQ(s2, heap_profiler->FindHeapSnapshot(uid2));
+ CHECK_EQ(s2, FindHeapSnapshot(heap_profiler, uid2));
const v8::HeapSnapshot* s3 =
heap_profiler->TakeHeapSnapshot(v8_str("3"));
CHECK_NE(NULL, s3);
CHECK_EQ(2, heap_profiler->GetSnapshotCount());
unsigned uid3 = s3->GetUid();
CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid3));
- CHECK_EQ(s3, heap_profiler->FindHeapSnapshot(uid3));
+ CHECK_EQ(s3, FindHeapSnapshot(heap_profiler, uid3));
const_cast<v8::HeapSnapshot*>(s2)->Delete();
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
- CHECK_EQ(NULL, heap_profiler->FindHeapSnapshot(uid2));
- CHECK_EQ(s3, heap_profiler->FindHeapSnapshot(uid3));
+ CHECK_EQ(NULL, FindHeapSnapshot(heap_profiler, uid2));
+ CHECK_EQ(s3, FindHeapSnapshot(heap_profiler, uid3));
const_cast<v8::HeapSnapshot*>(s3)->Delete();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- CHECK_EQ(NULL, heap_profiler->FindHeapSnapshot(uid3));
+ CHECK_EQ(NULL, FindHeapSnapshot(heap_profiler, uid3));
}
@@ -1598,8 +1604,7 @@ TEST(WeakGlobalHandle) {
CHECK(!HasWeakGlobalHandle());
- v8::Persistent<v8::Object> handle =
- v8::Persistent<v8::Object>::New(env->GetIsolate(), v8::Object::New());
+ v8::Persistent<v8::Object> handle(env->GetIsolate(), v8::Object::New());
handle.MakeWeak<v8::Value, void>(env->GetIsolate(),
NULL,
PersistentHandleCallback);
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index ca173c25a5..68ed83d145 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -27,10 +27,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "compilation-cache.h"
@@ -150,6 +146,7 @@ static void CheckFindCodeObject(Isolate* isolate) {
TEST(HeapObjects) {
CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
HandleScope sc(isolate);
@@ -205,7 +202,7 @@ TEST(HeapObjects) {
CHECK(heap->nan_value()->IsNumber());
CHECK(std::isnan(heap->nan_value()->Number()));
- Handle<String> s = FACTORY->NewStringFromAscii(CStrVector("fisk hest "));
+ Handle<String> s = factory->NewStringFromAscii(CStrVector("fisk hest "));
CHECK(s->IsString());
CHECK_EQ(10, s->length());
@@ -343,10 +340,12 @@ TEST(String) {
TEST(LocalHandles) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
const char* name = "Kasper the spunky";
- Handle<String> string = FACTORY->NewStringFromAscii(CStrVector(name));
+ Handle<String> string = factory->NewStringFromAscii(CStrVector(name));
CHECK_EQ(StrLength(name), string->length());
}
@@ -427,8 +426,7 @@ TEST(WeakGlobalHandlesScavenge) {
global_handles->MakeWeak(h2.location(),
reinterpret_cast<void*>(1234),
- &TestWeakGlobalHandleCallback,
- NULL);
+ &TestWeakGlobalHandleCallback);
// Scavenge treats weak pointers as normal roots.
heap->PerformScavenge();
@@ -474,8 +472,7 @@ TEST(WeakGlobalHandlesMark) {
global_handles->MakeWeak(h2.location(),
reinterpret_cast<void*>(1234),
- &TestWeakGlobalHandleCallback,
- NULL);
+ &TestWeakGlobalHandleCallback);
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
@@ -511,8 +508,7 @@ TEST(DeleteWeakGlobalHandle) {
global_handles->MakeWeak(h.location(),
reinterpret_cast<void*>(1234),
- &TestWeakGlobalHandleCallback,
- NULL);
+ &TestWeakGlobalHandleCallback);
// Scanvenge does not recognize weak reference.
heap->PerformScavenge();
@@ -616,17 +612,19 @@ TEST(StringTable) {
TEST(FunctionAllocation) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
- Handle<String> name = FACTORY->InternalizeUtf8String("theFunction");
+ Handle<String> name = factory->InternalizeUtf8String("theFunction");
Handle<JSFunction> function =
- FACTORY->NewFunction(name, FACTORY->undefined_value());
+ factory->NewFunction(name, factory->undefined_value());
Handle<Map> initial_map =
- FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- Handle<String> prop_name = FACTORY->InternalizeUtf8String("theSlot");
- Handle<JSObject> obj = FACTORY->NewJSObject(function);
+ Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
+ Handle<JSObject> obj = factory->NewJSObject(function);
obj->SetProperty(
*prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
@@ -639,6 +637,8 @@ TEST(FunctionAllocation) {
TEST(ObjectProperties) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
String* object_string = String::cast(HEAP->Object_string());
@@ -646,9 +646,9 @@ TEST(ObjectProperties) {
GetProperty(object_string)->ToObjectChecked();
JSFunction* object_function = JSFunction::cast(raw_object);
Handle<JSFunction> constructor(object_function);
- Handle<JSObject> obj = FACTORY->NewJSObject(constructor);
- Handle<String> first = FACTORY->InternalizeUtf8String("first");
- Handle<String> second = FACTORY->InternalizeUtf8String("second");
+ Handle<JSObject> obj = factory->NewJSObject(constructor);
+ Handle<String> first = factory->InternalizeUtf8String("first");
+ Handle<String> second = factory->InternalizeUtf8String("second");
// check for empty
CHECK(!obj->HasLocalProperty(*first));
@@ -694,35 +694,37 @@ TEST(ObjectProperties) {
// check string and internalized string match
const char* string1 = "fisk";
- Handle<String> s1 = FACTORY->NewStringFromAscii(CStrVector(string1));
+ Handle<String> s1 = factory->NewStringFromAscii(CStrVector(string1));
obj->SetProperty(
*s1, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
- Handle<String> s1_string = FACTORY->InternalizeUtf8String(string1);
+ Handle<String> s1_string = factory->InternalizeUtf8String(string1);
CHECK(obj->HasLocalProperty(*s1_string));
// check internalized string and string match
const char* string2 = "fugl";
- Handle<String> s2_string = FACTORY->InternalizeUtf8String(string2);
+ Handle<String> s2_string = factory->InternalizeUtf8String(string2);
obj->SetProperty(
*s2_string, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
- Handle<String> s2 = FACTORY->NewStringFromAscii(CStrVector(string2));
+ Handle<String> s2 = factory->NewStringFromAscii(CStrVector(string2));
CHECK(obj->HasLocalProperty(*s2));
}
TEST(JSObjectMaps) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
- Handle<String> name = FACTORY->InternalizeUtf8String("theFunction");
+ Handle<String> name = factory->InternalizeUtf8String("theFunction");
Handle<JSFunction> function =
- FACTORY->NewFunction(name, FACTORY->undefined_value());
+ factory->NewFunction(name, factory->undefined_value());
Handle<Map> initial_map =
- FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- Handle<String> prop_name = FACTORY->InternalizeUtf8String("theSlot");
- Handle<JSObject> obj = FACTORY->NewJSObject(function);
+ Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
+ Handle<JSObject> obj = factory->NewJSObject(function);
// Set a propery
obj->SetProperty(
@@ -736,16 +738,18 @@ TEST(JSObjectMaps) {
TEST(JSArray) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
- Handle<String> name = FACTORY->InternalizeUtf8String("Array");
+ Handle<String> name = factory->InternalizeUtf8String("Array");
Object* raw_object = Isolate::Current()->context()->global_object()->
GetProperty(*name)->ToObjectChecked();
Handle<JSFunction> function = Handle<JSFunction>(
JSFunction::cast(raw_object));
// Allocate the object.
- Handle<JSObject> object = FACTORY->NewJSObject(function);
+ Handle<JSObject> object = factory->NewJSObject(function);
Handle<JSArray> array = Handle<JSArray>::cast(object);
// We just initialized the VM, no heap allocation failure yet.
array->Initialize(0)->ToObjectChecked();
@@ -763,7 +767,7 @@ TEST(JSArray) {
// Set array length with larger than smi value.
Handle<Object> length =
- FACTORY->NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
+ factory->NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
array->SetElementsLength(*length)->ToObjectChecked();
uint32_t int_length = 0;
@@ -783,6 +787,8 @@ TEST(JSArray) {
TEST(JSObjectCopy) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
String* object_string = String::cast(HEAP->Object_string());
@@ -790,9 +796,9 @@ TEST(JSObjectCopy) {
GetProperty(object_string)->ToObjectChecked();
JSFunction* object_function = JSFunction::cast(raw_object);
Handle<JSFunction> constructor(object_function);
- Handle<JSObject> obj = FACTORY->NewJSObject(constructor);
- Handle<String> first = FACTORY->InternalizeUtf8String("first");
- Handle<String> second = FACTORY->InternalizeUtf8String("second");
+ Handle<JSObject> obj = factory->NewJSObject(constructor);
+ Handle<String> first = factory->InternalizeUtf8String("first");
+ Handle<String> second = factory->InternalizeUtf8String("second");
obj->SetProperty(
*first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
@@ -831,6 +837,8 @@ TEST(JSObjectCopy) {
TEST(StringAllocation) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
const unsigned char chars[] = { 0xe5, 0xa4, 0xa7 };
for (int length = 0; length < 100; length++) {
@@ -846,18 +854,18 @@ TEST(StringAllocation) {
non_ascii[3 * i + 2] = chars[2];
}
Handle<String> non_ascii_sym =
- FACTORY->InternalizeUtf8String(
+ factory->InternalizeUtf8String(
Vector<const char>(non_ascii, 3 * length));
CHECK_EQ(length, non_ascii_sym->length());
Handle<String> ascii_sym =
- FACTORY->InternalizeOneByteString(OneByteVector(ascii, length));
+ factory->InternalizeOneByteString(OneByteVector(ascii, length));
CHECK_EQ(length, ascii_sym->length());
Handle<String> non_ascii_str =
- FACTORY->NewStringFromUtf8(Vector<const char>(non_ascii, 3 * length));
+ factory->NewStringFromUtf8(Vector<const char>(non_ascii, 3 * length));
non_ascii_str->Hash();
CHECK_EQ(length, non_ascii_str->length());
Handle<String> ascii_str =
- FACTORY->NewStringFromUtf8(Vector<const char>(ascii, length));
+ factory->NewStringFromUtf8(Vector<const char>(ascii, length));
ascii_str->Hash();
CHECK_EQ(length, ascii_str->length());
DeleteArray(non_ascii);
@@ -883,6 +891,8 @@ static int ObjectsFoundInHeap(Heap* heap, Handle<Object> objs[], int size) {
TEST(Iteration) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
// Array of objects to scan haep for.
@@ -891,16 +901,16 @@ TEST(Iteration) {
int next_objs_index = 0;
// Allocate a JS array to OLD_POINTER_SPACE and NEW_SPACE
- objs[next_objs_index++] = FACTORY->NewJSArray(10);
- objs[next_objs_index++] = FACTORY->NewJSArray(10,
+ objs[next_objs_index++] = factory->NewJSArray(10);
+ objs[next_objs_index++] = factory->NewJSArray(10,
FAST_HOLEY_ELEMENTS,
TENURED);
// Allocate a small string to OLD_DATA_SPACE and NEW_SPACE
objs[next_objs_index++] =
- FACTORY->NewStringFromAscii(CStrVector("abcdefghij"));
+ factory->NewStringFromAscii(CStrVector("abcdefghij"));
objs[next_objs_index++] =
- FACTORY->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
+ factory->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
// Allocate a large string (for large object space).
int large_size = Page::kMaxNonCodeHeapObjectSize + 1;
@@ -908,7 +918,7 @@ TEST(Iteration) {
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';
objs[next_objs_index++] =
- FACTORY->NewStringFromAscii(CStrVector(str), TENURED);
+ factory->NewStringFromAscii(CStrVector(str), TENURED);
delete[] str;
// Add a Map object to look for.
@@ -943,6 +953,8 @@ static int LenFromSize(int size) {
TEST(Regression39128) {
// Test case for crbug.com/39128.
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
// Increase the chance of 'bump-the-pointer' allocation in old space.
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
@@ -959,7 +971,7 @@ TEST(Regression39128) {
CHECK(object_ctor->has_initial_map());
Handle<Map> object_map(object_ctor->initial_map());
// Create a map with single inobject property.
- Handle<Map> my_map = FACTORY->CopyMap(object_map, 1);
+ Handle<Map> my_map = factory->CopyMap(object_map, 1);
int n_properties = my_map->inobject_properties();
CHECK_GT(n_properties, 0);
@@ -1020,6 +1032,8 @@ TEST(TestCodeFlushing) {
if (!FLAG_flush_code) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
const char* source = "function foo() {"
" var x = 42;"
@@ -1027,7 +1041,7 @@ TEST(TestCodeFlushing) {
" var z = x + y;"
"};"
"foo()";
- Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
+ Handle<String> foo_name = factory->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope(CcTest::isolate());
@@ -1067,6 +1081,8 @@ TEST(TestCodeFlushingIncremental) {
if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
const char* source = "function foo() {"
" var x = 42;"
@@ -1074,7 +1090,7 @@ TEST(TestCodeFlushingIncremental) {
" var z = x + y;"
"};"
"foo()";
- Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
+ Handle<String> foo_name = factory->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope(CcTest::isolate());
@@ -1133,6 +1149,8 @@ TEST(TestCodeFlushingIncrementalScavenge) {
if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
const char* source = "var foo = function() {"
" var x = 42;"
@@ -1144,8 +1162,8 @@ TEST(TestCodeFlushingIncrementalScavenge) {
" var x = 23;"
"};"
"bar();";
- Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
- Handle<String> bar_name = FACTORY->InternalizeUtf8String("bar");
+ Handle<String> foo_name = factory->InternalizeUtf8String("foo");
+ Handle<String> bar_name = factory->InternalizeUtf8String("bar");
// Perfrom one initial GC to enable code flushing.
HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
@@ -1200,6 +1218,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
const char* source = "function foo() {"
@@ -1208,7 +1227,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
" var z = x + y;"
"};"
"foo()";
- Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
+ Handle<String> foo_name = factory->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope(CcTest::isolate());
@@ -1663,21 +1682,23 @@ TEST(LeakNativeContextViaMap) {
i::FLAG_allow_natives_syntax = true;
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope outer_scope(isolate);
- v8::Persistent<v8::Context> ctx1;
- v8::Persistent<v8::Context> ctx2;
+ v8::Persistent<v8::Context> ctx1p;
+ v8::Persistent<v8::Context> ctx2p;
{
v8::HandleScope scope(isolate);
- ctx1.Reset(isolate, v8::Context::New(isolate));
- ctx2.Reset(isolate, v8::Context::New(isolate));
+ ctx1p.Reset(isolate, v8::Context::New(isolate));
+ ctx2p.Reset(isolate, v8::Context::New(isolate));
+ v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- ctx1->Enter();
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(4, NumberOfGlobalObjects());
{
- v8::HandleScope inner_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope inner_scope(isolate);
CompileRun("var v = {x: 42}");
+ v8::Local<v8::Context> ctx1 = v8::Local<v8::Context>::New(isolate, ctx1p);
+ v8::Local<v8::Context> ctx2 = v8::Local<v8::Context>::New(isolate, ctx2p);
v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
ctx2->Enter();
ctx2->Global()->Set(v8_str("o"), v);
@@ -1689,13 +1710,13 @@ TEST(LeakNativeContextViaMap) {
CHECK_EQ(42, res->Int32Value());
ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
ctx2->Exit();
- ctx1->Exit();
- ctx1.Dispose(ctx1->GetIsolate());
+ v8::Local<v8::Context>::New(isolate, ctx1)->Exit();
+ ctx1p.Dispose(isolate);
v8::V8::ContextDisposedNotification();
}
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
- ctx2.Dispose(ctx2->GetIsolate());
+ ctx2p.Dispose(isolate);
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1707,21 +1728,23 @@ TEST(LeakNativeContextViaFunction) {
i::FLAG_allow_natives_syntax = true;
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope outer_scope(isolate);
- v8::Persistent<v8::Context> ctx1;
- v8::Persistent<v8::Context> ctx2;
+ v8::Persistent<v8::Context> ctx1p;
+ v8::Persistent<v8::Context> ctx2p;
{
v8::HandleScope scope(isolate);
- ctx1.Reset(isolate, v8::Context::New(isolate));
- ctx2.Reset(isolate, v8::Context::New(isolate));
+ ctx1p.Reset(isolate, v8::Context::New(isolate));
+ ctx2p.Reset(isolate, v8::Context::New(isolate));
+ v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- ctx1->Enter();
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(4, NumberOfGlobalObjects());
{
- v8::HandleScope inner_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope inner_scope(isolate);
CompileRun("var v = function() { return 42; }");
+ v8::Local<v8::Context> ctx1 = v8::Local<v8::Context>::New(isolate, ctx1p);
+ v8::Local<v8::Context> ctx2 = v8::Local<v8::Context>::New(isolate, ctx2p);
v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
ctx2->Enter();
ctx2->Global()->Set(v8_str("o"), v);
@@ -1734,12 +1757,12 @@ TEST(LeakNativeContextViaFunction) {
ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
ctx2->Exit();
ctx1->Exit();
- ctx1.Dispose(ctx1->GetIsolate());
+ ctx1p.Dispose(ctx1->GetIsolate());
v8::V8::ContextDisposedNotification();
}
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
- ctx2.Dispose(ctx2->GetIsolate());
+ ctx2p.Dispose(isolate);
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1749,21 +1772,23 @@ TEST(LeakNativeContextViaMapKeyed) {
i::FLAG_allow_natives_syntax = true;
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope outer_scope(isolate);
- v8::Persistent<v8::Context> ctx1;
- v8::Persistent<v8::Context> ctx2;
+ v8::Persistent<v8::Context> ctx1p;
+ v8::Persistent<v8::Context> ctx2p;
{
v8::HandleScope scope(isolate);
- ctx1.Reset(isolate, v8::Context::New(isolate));
- ctx2.Reset(isolate, v8::Context::New(isolate));
+ ctx1p.Reset(isolate, v8::Context::New(isolate));
+ ctx2p.Reset(isolate, v8::Context::New(isolate));
+ v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- ctx1->Enter();
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(4, NumberOfGlobalObjects());
{
- v8::HandleScope inner_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope inner_scope(isolate);
CompileRun("var v = [42, 43]");
+ v8::Local<v8::Context> ctx1 = v8::Local<v8::Context>::New(isolate, ctx1p);
+ v8::Local<v8::Context> ctx2 = v8::Local<v8::Context>::New(isolate, ctx2p);
v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
ctx2->Enter();
ctx2->Global()->Set(v8_str("o"), v);
@@ -1776,12 +1801,12 @@ TEST(LeakNativeContextViaMapKeyed) {
ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
ctx2->Exit();
ctx1->Exit();
- ctx1.Dispose(ctx1->GetIsolate());
+ ctx1p.Dispose(ctx1->GetIsolate());
v8::V8::ContextDisposedNotification();
}
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
- ctx2.Dispose(ctx2->GetIsolate());
+ ctx2p.Dispose(isolate);
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1791,21 +1816,23 @@ TEST(LeakNativeContextViaMapProto) {
i::FLAG_allow_natives_syntax = true;
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope outer_scope(isolate);
- v8::Persistent<v8::Context> ctx1;
- v8::Persistent<v8::Context> ctx2;
+ v8::Persistent<v8::Context> ctx1p;
+ v8::Persistent<v8::Context> ctx2p;
{
v8::HandleScope scope(isolate);
- ctx1.Reset(isolate, v8::Context::New(isolate));
- ctx2.Reset(isolate, v8::Context::New(isolate));
+ ctx1p.Reset(isolate, v8::Context::New(isolate));
+ ctx2p.Reset(isolate, v8::Context::New(isolate));
+ v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- ctx1->Enter();
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(4, NumberOfGlobalObjects());
{
- v8::HandleScope inner_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope inner_scope(isolate);
CompileRun("var v = { y: 42}");
+ v8::Local<v8::Context> ctx1 = v8::Local<v8::Context>::New(isolate, ctx1p);
+ v8::Local<v8::Context> ctx2 = v8::Local<v8::Context>::New(isolate, ctx2p);
v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
ctx2->Enter();
ctx2->Global()->Set(v8_str("o"), v);
@@ -1822,12 +1849,12 @@ TEST(LeakNativeContextViaMapProto) {
ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
ctx2->Exit();
ctx1->Exit();
- ctx1.Dispose(ctx1->GetIsolate());
+ ctx1p.Dispose(isolate);
v8::V8::ContextDisposedNotification();
}
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
- ctx2.Dispose(ctx2->GetIsolate());
+ ctx2p.Dispose(isolate);
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1891,6 +1918,8 @@ TEST(InstanceOfStubWriteBarrier) {
TEST(PrototypeTransitionClearing) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
CompileRun(
@@ -1930,7 +1959,7 @@ TEST(PrototypeTransitionClearing) {
{
AlwaysAllocateScope always_allocate;
SimulateFullSpace(space);
- prototype = FACTORY->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
+ prototype = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
}
// Add a prototype on an evacuation candidate and verify that transition
@@ -2078,11 +2107,11 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
// Test pretenuring of array literals allocated with HAllocate.
TEST(OptimizedPretenuringArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
- i::FLAG_pretenure_literals = true;
CcTest::InitializeVM();
if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
+ HEAP->SetNewSpaceHighPromotionModeActive(true);
AlwaysAllocateScope always_allocate;
v8::Local<v8::Value> res = CompileRun(
@@ -2104,7 +2133,7 @@ TEST(OptimizedPretenuringArrayLiterals) {
TEST(OptimizedPretenuringSimpleArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
- i::FLAG_pretenure_literals = false;
+ i::FLAG_pretenuring = false;
CcTest::InitializeVM();
if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2154,6 +2183,30 @@ TEST(OptimizedAllocationArrayLiterals) {
}
+TEST(OptimizedPretenuringCallNew) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_pretenuring_call_new = true;
+ CcTest::InitializeVM();
+ if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
+ if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
+ v8::HandleScope scope(CcTest::isolate());
+ HEAP->SetNewSpaceHighPromotionModeActive(true);
+
+ AlwaysAllocateScope always_allocate;
+ v8::Local<v8::Value> res = CompileRun(
+ "function g() { this.a = 0; }"
+ "function f() {"
+ " return new g();"
+ "};"
+ "f(); f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+
+ Handle<JSObject> o =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
+ CHECK(HEAP->InOldPointerSpace(*o));
+}
+
static int CountMapTransitions(Map* map) {
return map->transitions()->number_of_transitions();
}
@@ -2289,6 +2342,8 @@ TEST(ReleaseOverReservedPages) {
i::FLAG_crankshaft = false;
i::FLAG_always_opt = false;
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
static const int number_of_test_pages = 20;
@@ -2298,7 +2353,7 @@ TEST(ReleaseOverReservedPages) {
for (int i = 0; i < number_of_test_pages; i++) {
AlwaysAllocateScope always_allocate;
SimulateFullSpace(old_pointer_space);
- FACTORY->NewFixedArray(1, TENURED);
+ factory->NewFixedArray(1, TENURED);
}
CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
@@ -2328,6 +2383,8 @@ TEST(ReleaseOverReservedPages) {
TEST(Regress2237) {
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
Handle<String> slice(HEAP->empty_string());
@@ -2335,7 +2392,7 @@ TEST(Regress2237) {
// Generate a parent that lives in new-space.
v8::HandleScope inner_scope(CcTest::isolate());
const char* c = "This text is long enough to trigger sliced strings.";
- Handle<String> s = FACTORY->NewStringFromAscii(CStrVector(c));
+ Handle<String> s = factory->NewStringFromAscii(CStrVector(c));
CHECK(s->IsSeqOneByteString());
CHECK(HEAP->InNewSpace(*s));
@@ -2343,7 +2400,7 @@ TEST(Regress2237) {
// lives in old-space.
SimulateFullSpace(HEAP->new_space());
AlwaysAllocateScope always_allocate;
- Handle<String> t = FACTORY->NewProperSubString(s, 5, 35);
+ Handle<String> t = factory->NewProperSubString(s, 5, 35);
CHECK(t->IsSlicedString());
CHECK(!HEAP->InNewSpace(*t));
*slice.location() = *t.location();
@@ -2367,7 +2424,7 @@ TEST(PrintSharedFunctionInfo) {
*v8::Handle<v8::Function>::Cast(
v8::Context::GetCurrent()->Global()->Get(v8_str("g"))));
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
g->shared()->PrintLn();
}
#endif // OBJECT_PRINT
@@ -2899,6 +2956,8 @@ TEST(Regress169928) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_crankshaft = false;
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
// Some flags turn Scavenge collections into Mark-sweep collections
@@ -2929,7 +2988,7 @@ TEST(Regress169928) {
HEAP->CollectGarbage(NEW_SPACE);
// Allocate the object.
- Handle<FixedArray> array_data = FACTORY->NewFixedArray(2, NOT_TENURED);
+ Handle<FixedArray> array_data = factory->NewFixedArray(2, NOT_TENURED);
array_data->set(0, Smi::FromInt(1));
array_data->set(1, Smi::FromInt(2));
@@ -2937,7 +2996,7 @@ TEST(Regress169928) {
JSArray::kSize + AllocationSiteInfo::kSize +
kPointerSize);
- Handle<JSArray> array = FACTORY->NewJSArrayWithElements(array_data,
+ Handle<JSArray> array = factory->NewJSArrayWithElements(array_data,
FAST_SMI_ELEMENTS,
NOT_TENURED);
@@ -3103,3 +3162,19 @@ TEST(DeferredHandles) {
isolate->handle_scope_implementer()->Iterate(&visitor);
deferred.Detach();
}
+
+
+TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ CompileRun("function f(n) {"
+ " var a = new Array(n);"
+ " for (var i = 0; i < n; i += 100) a[i] = i;"
+ "};"
+ "f(10 * 1024 * 1024);");
+ IncrementalMarking* marking = HEAP->incremental_marking();
+ if (marking->IsStopped()) marking->Start();
+ // This big step should be sufficient to mark the whole array.
+ marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ ASSERT(marking->IsComplete());
+}
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index 5977f095c6..9d24535952 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -27,9 +27,6 @@
#include <limits.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "api.h"
@@ -73,7 +70,9 @@ class KangarooThread : public v8::internal::Thread {
v8::Isolate::Scope isolate_scope(isolate_);
CHECK_EQ(isolate_, v8::internal::Isolate::Current());
v8::HandleScope scope(isolate_);
- v8::Context::Scope context_scope(isolate_, context_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate_, context_);
+ v8::Context::Scope context_scope(context);
Local<Value> v = CompileRun("getValue()");
CHECK(v->IsNumber());
CHECK_EQ(30, static_cast<int>(v->NumberValue()));
@@ -82,7 +81,9 @@ class KangarooThread : public v8::internal::Thread {
v8::Locker locker(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope scope(isolate_);
- v8::Context::Scope context_scope(isolate_, context_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate_, context_);
+ v8::Context::Scope context_scope(context);
Local<Value> v = CompileRun("getValue()");
CHECK(v->IsNumber());
CHECK_EQ(30, static_cast<int>(v->NumberValue()));
@@ -352,7 +353,9 @@ class LockIsolateAndCalculateFibSharedContextThread : public JoinableThread {
v8::Locker lock(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
HandleScope handle_scope(isolate_);
- v8::Context::Scope context_scope(isolate_, context_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate_, context_);
+ v8::Context::Scope context_scope(context);
CalcFibAndCheck();
}
private:
@@ -540,7 +543,9 @@ class LockUnlockLockThread : public JoinableThread {
{
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
- v8::Context::Scope context_scope(isolate_, context_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate_, context_);
+ v8::Context::Scope context_scope(context);
CalcFibAndCheck();
}
{
@@ -553,7 +558,9 @@ class LockUnlockLockThread : public JoinableThread {
v8::HandleScope handle_scope(isolate_);
CHECK(v8::Locker::IsLocked(isolate_));
CHECK(!v8::Locker::IsLocked(CcTest::default_isolate()));
- v8::Context::Scope context_scope(isolate_, context_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate_, context_);
+ v8::Context::Scope context_scope(context);
CalcFibAndCheck();
}
}
@@ -597,7 +604,9 @@ class LockUnlockLockDefaultIsolateThread : public JoinableThread {
v8::Locker lock1(CcTest::default_isolate());
{
v8::HandleScope handle_scope(CcTest::default_isolate());
- v8::Context::Scope context_scope(CcTest::default_isolate(), context_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(CcTest::default_isolate(), context_);
+ v8::Context::Scope context_scope(context);
CalcFibAndCheck();
}
{
@@ -605,7 +614,9 @@ class LockUnlockLockDefaultIsolateThread : public JoinableThread {
{
v8::Locker lock2(CcTest::default_isolate());
v8::HandleScope handle_scope(CcTest::default_isolate());
- v8::Context::Scope context_scope(CcTest::default_isolate(), context_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(CcTest::default_isolate(), context_);
+ v8::Context::Scope context_scope(context);
CalcFibAndCheck();
}
}
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 3c34ede8bf..ca6c7aea47 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -29,10 +29,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "api.h"
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 3288fc8a61..d2ef02d6b6 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -34,11 +34,6 @@
#include <cmath>
#endif // __linux__
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "log.h"
#include "cpu-profiler.h"
@@ -405,9 +400,9 @@ TEST(LogCallbacks) {
ScopedLoggerInitializer initialize_logger(false);
Logger* logger = initialize_logger.logger();
- v8::Persistent<v8::FunctionTemplate> obj =
- v8::Persistent<v8::FunctionTemplate>::New(v8::Isolate::GetCurrent(),
- v8::FunctionTemplate::New());
+ v8::Local<v8::FunctionTemplate> obj =
+ v8::Local<v8::FunctionTemplate>::New(v8::Isolate::GetCurrent(),
+ v8::FunctionTemplate::New());
obj->SetClassName(v8_str("Obj"));
v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
v8::Local<v8::Signature> signature =
@@ -434,8 +429,6 @@ TEST(LogCallbacks) {
ObjMethod1);
CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
-
- obj.Dispose(v8::Isolate::GetCurrent());
}
@@ -458,9 +451,9 @@ TEST(LogAccessorCallbacks) {
ScopedLoggerInitializer initialize_logger(false);
Logger* logger = initialize_logger.logger();
- v8::Persistent<v8::FunctionTemplate> obj =
- v8::Persistent<v8::FunctionTemplate>::New(v8::Isolate::GetCurrent(),
- v8::FunctionTemplate::New());
+ v8::Local<v8::FunctionTemplate> obj =
+ v8::Local<v8::FunctionTemplate>::New(v8::Isolate::GetCurrent(),
+ v8::FunctionTemplate::New());
obj->SetClassName(v8_str("Obj"));
v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
@@ -493,8 +486,6 @@ TEST(LogAccessorCallbacks) {
Prop2Getter);
CHECK_NE(NULL,
StrNStr(log.start(), prop2_getter_record.start(), log.length()));
-
- obj.Dispose(v8::Isolate::GetCurrent());
}
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index dc21ac2e3c..45cb39c07e 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -36,10 +36,6 @@
#endif
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "global-handles.h"
@@ -239,24 +235,26 @@ TEST(MarkCompactCollector) {
// TODO(1600): compaction of map space is temporary removed from GC.
#if 0
-static Handle<Map> CreateMap() {
- return FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+static Handle<Map> CreateMap(Isolate* isolate) {
+ return isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
}
TEST(MapCompact) {
FLAG_max_map_space_pages = 16;
CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
{
v8::HandleScope sc;
// keep allocating maps while pointers are still encodable and thus
// mark compact is permitted.
- Handle<JSObject> root = FACTORY->NewJSObjectFromMap(CreateMap());
+ Handle<JSObject> root = factory->NewJSObjectFromMap(CreateMap());
do {
Handle<Map> map = CreateMap();
map->set_prototype(*root);
- root = FACTORY->NewJSObjectFromMap(map);
+ root = factory->NewJSObjectFromMap(map);
} while (HEAP->map_space()->MapPointersEncodable());
}
// Now, as we don't have any handles to just allocated maps, we should
@@ -327,16 +325,13 @@ TEST(ObjectGroups) {
global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
global_handles->MakeWeak(g1s1.location(),
reinterpret_cast<void*>(1234),
- &WeakPointerCallback,
- NULL);
+ &WeakPointerCallback);
global_handles->MakeWeak(g1s2.location(),
reinterpret_cast<void*>(1234),
- &WeakPointerCallback,
- NULL);
+ &WeakPointerCallback);
global_handles->MakeWeak(g1c1.location(),
reinterpret_cast<void*>(1234),
- &WeakPointerCallback,
- NULL);
+ &WeakPointerCallback);
Handle<Object> g2s1 =
global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
@@ -346,16 +341,13 @@ TEST(ObjectGroups) {
global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
global_handles->MakeWeak(g2s1.location(),
reinterpret_cast<void*>(1234),
- &WeakPointerCallback,
- NULL);
+ &WeakPointerCallback);
global_handles->MakeWeak(g2s2.location(),
reinterpret_cast<void*>(1234),
- &WeakPointerCallback,
- NULL);
+ &WeakPointerCallback);
global_handles->MakeWeak(g2c1.location(),
reinterpret_cast<void*>(1234),
- &WeakPointerCallback,
- NULL);
+ &WeakPointerCallback);
Handle<Object> root = global_handles->Create(*g1s1); // make a root.
@@ -384,8 +376,7 @@ TEST(ObjectGroups) {
// Weaken the root.
global_handles->MakeWeak(root.location(),
reinterpret_cast<void*>(1234),
- &WeakPointerCallback,
- NULL);
+ &WeakPointerCallback);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
global_handles->ClearWeakness(g1c1.location());
@@ -413,12 +404,10 @@ TEST(ObjectGroups) {
// And now make children weak again and collect them.
global_handles->MakeWeak(g1c1.location(),
reinterpret_cast<void*>(1234),
- &WeakPointerCallback,
- NULL);
+ &WeakPointerCallback);
global_handles->MakeWeak(g2c1.location(),
reinterpret_cast<void*>(1234),
- &WeakPointerCallback,
- NULL);
+ &WeakPointerCallback);
HEAP->CollectGarbage(OLD_POINTER_SPACE);
CHECK_EQ(7, NumberOfWeakCalls);
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index 9eb6f3c859..0dd02650b0 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -418,7 +418,8 @@ TEST(ObservationWeakMap) {
"Object.observe(obj, function(){});"
"Object.getNotifier(obj);"
"obj = null;");
- i::Handle<i::JSObject> observation_state = FACTORY->observation_state();
+ i::Handle<i::JSObject> observation_state =
+ i::Isolate::Current()->factory()->observation_state();
i::Handle<i::JSWeakMap> observerInfoMap =
i::Handle<i::JSWeakMap>::cast(
i::GetProperty(observation_state, "observerInfoMap"));
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 170ec76a14..62a5bcc397 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -29,9 +29,6 @@
#include <stdio.h>
#include <string.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "cctest.h"
@@ -339,13 +336,15 @@ TEST(RegressChromium62639) {
TEST(Regress928) {
v8::V8::Initialize();
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Factory* factory = isolate->factory();
// Preparsing didn't consider the catch clause of a try statement
// as with-content, which made it assume that a function inside
// the block could be lazily compiled, and an extra, unexpected,
// entry was added to the data.
int marker;
- i::Isolate::Current()->stack_guard()->SetStackLimit(
+ isolate->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
const char* program =
@@ -354,7 +353,7 @@ TEST(Regress928) {
v8::HandleScope handles(v8::Isolate::GetCurrent());
i::Handle<i::String> source(
- FACTORY->NewStringFromAscii(i::CStrVector(program)));
+ factory->NewStringFromAscii(i::CStrVector(program)));
i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
i::ScriptDataImpl* data = i::PreParserApi::PreParse(&stream);
CHECK(!data->HasError());
@@ -437,17 +436,19 @@ void TestCharacterStream(const char* ascii_source,
unsigned end = 0) {
if (end == 0) end = length;
unsigned sub_length = end - start;
- i::HandleScope test_scope(i::Isolate::Current());
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Factory* factory = isolate->factory();
+ i::HandleScope test_scope(isolate);
i::SmartArrayPointer<i::uc16> uc16_buffer(new i::uc16[length]);
for (unsigned i = 0; i < length; i++) {
uc16_buffer[i] = static_cast<i::uc16>(ascii_source[i]);
}
i::Vector<const char> ascii_vector(ascii_source, static_cast<int>(length));
i::Handle<i::String> ascii_string(
- FACTORY->NewStringFromAscii(ascii_vector));
+ factory->NewStringFromAscii(ascii_vector));
TestExternalResource resource(*uc16_buffer, length);
i::Handle<i::String> uc16_string(
- FACTORY->NewExternalStringFromTwoByte(&resource));
+ factory->NewExternalStringFromTwoByte(&resource));
i::ExternalTwoByteStringUtf16CharacterStream uc16_stream(
i::Handle<i::ExternalTwoByteString>::cast(uc16_string), start, end);
@@ -987,12 +988,15 @@ TEST(ScopePositions) {
{ NULL, NULL, NULL, i::EVAL_SCOPE, i::CLASSIC_MODE }
};
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Factory* factory = isolate->factory();
+
v8::HandleScope handles(v8::Isolate::GetCurrent());
v8::Handle<v8::Context> context = v8::Context::New(v8::Isolate::GetCurrent());
v8::Context::Scope context_scope(context);
int marker;
- i::Isolate::Current()->stack_guard()->SetStackLimit(
+ isolate->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
for (int i = 0; source_data[i].outer_prefix; i++) {
@@ -1012,9 +1016,9 @@ TEST(ScopePositions) {
// Parse program source.
i::Handle<i::String> source(
- FACTORY->NewStringFromUtf8(i::CStrVector(program.start())));
+ factory->NewStringFromUtf8(i::CStrVector(program.start())));
CHECK_EQ(source->length(), kProgramSize);
- i::Handle<i::Script> script = FACTORY->NewScript(source);
+ i::Handle<i::Script> script = factory->NewScript(source);
i::CompilationInfoWithZone info(script);
i::Parser parser(&info);
parser.set_allow_lazy(true);
@@ -1032,7 +1036,7 @@ TEST(ScopePositions) {
CHECK_EQ(scope->inner_scopes()->length(), 1);
i::Scope* inner_scope = scope->inner_scopes()->at(0);
- CHECK_EQ(inner_scope->type(), source_data[i].scope_type);
+ CHECK_EQ(inner_scope->scope_type(), source_data[i].scope_type);
CHECK_EQ(inner_scope->start_position(), kPrefixLen);
// The end position of a token is one position after the last
// character belonging to that token.
@@ -1042,10 +1046,12 @@ TEST(ScopePositions) {
i::Handle<i::String> FormatMessage(i::ScriptDataImpl* data) {
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Factory* factory = isolate->factory();
i::Handle<i::String> format = v8::Utils::OpenHandle(
*v8::String::New(data->BuildMessage()));
i::Vector<const char*> args = data->BuildArgs();
- i::Handle<i::JSArray> args_array = FACTORY->NewJSArray(args.length());
+ i::Handle<i::JSArray> args_array = factory->NewJSArray(args.length());
for (int i = 0; i < args.length(); i++) {
i::JSArray::SetElement(args_array,
i,
@@ -1053,7 +1059,7 @@ i::Handle<i::String> FormatMessage(i::ScriptDataImpl* data) {
NONE,
i::kNonStrictMode);
}
- i::Handle<i::JSObject> builtins(i::Isolate::Current()->js_builtins_object());
+ i::Handle<i::JSObject> builtins(isolate->js_builtins_object());
i::Handle<i::Object> format_fun =
i::GetProperty(builtins, "FormatMessage");
i::Handle<i::Object> arg_handles[] = { format, args_array };
@@ -1072,6 +1078,7 @@ enum ParserFlag {
kAllowHarmonyScoping,
kAllowModules,
kAllowGenerators,
+ kAllowForOf,
kParserFlagCount
};
@@ -1088,15 +1095,19 @@ static bool checkParserFlag(unsigned flags, ParserFlag flag) {
parser.set_allow_harmony_scoping(checkParserFlag(flags, \
kAllowHarmonyScoping)); \
parser.set_allow_modules(checkParserFlag(flags, kAllowModules)); \
- parser.set_allow_generators(checkParserFlag(flags, kAllowGenerators));
+ parser.set_allow_generators(checkParserFlag(flags, kAllowGenerators)); \
+ parser.set_allow_for_of(checkParserFlag(flags, kAllowForOf));
void TestParserSyncWithFlags(i::Handle<i::String> source, unsigned flags) {
- uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Factory* factory = isolate->factory();
+
+ uintptr_t stack_limit = isolate->stack_guard()->real_climit();
// Preparse the data.
i::CompleteParserRecorder log;
{
- i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+ i::Scanner scanner(isolate->unicode_cache());
i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
v8::preparser::PreParser preparser(&scanner, &log, stack_limit);
SET_PARSER_FLAGS(preparser, flags);
@@ -1110,7 +1121,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source, unsigned flags) {
// Parse the data
i::FunctionLiteral* function;
{
- i::Handle<i::Script> script = FACTORY->NewScript(source);
+ i::Handle<i::Script> script = factory->NewScript(source);
i::CompilationInfoWithZone info(script);
i::Parser parser(&info);
SET_PARSER_FLAGS(parser, flags);
@@ -1121,8 +1132,8 @@ void TestParserSyncWithFlags(i::Handle<i::String> source, unsigned flags) {
// Check that preparsing fails iff parsing fails.
if (function == NULL) {
// Extract exception from the parser.
- CHECK(i::Isolate::Current()->has_pending_exception());
- i::MaybeObject* maybe_object = i::Isolate::Current()->pending_exception();
+ CHECK(isolate->has_pending_exception());
+ i::MaybeObject* maybe_object = isolate->pending_exception();
i::JSObject* exception = NULL;
CHECK(maybe_object->To(&exception));
i::Handle<i::JSObject> exception_handle(exception);
@@ -1246,12 +1257,15 @@ TEST(ParserSync) {
// correct timeout for this and re-enable this test again.
if (i::FLAG_stress_compaction) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Factory* factory = isolate->factory();
+
v8::HandleScope handles(v8::Isolate::GetCurrent());
v8::Handle<v8::Context> context = v8::Context::New(v8::Isolate::GetCurrent());
v8::Context::Scope context_scope(context);
int marker;
- i::Isolate::Current()->stack_guard()->SetStackLimit(
+ isolate->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
for (int i = 0; context_data[i][0] != NULL; ++i) {
@@ -1274,7 +1288,7 @@ TEST(ParserSync) {
context_data[i][1]);
CHECK(length == kProgramSize);
i::Handle<i::String> source =
- FACTORY->NewStringFromAscii(i::CStrVector(program.start()));
+ factory->NewStringFromAscii(i::CStrVector(program.start()));
TestParserSync(source);
}
}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 70b34e3d7c..70091d9032 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -27,9 +27,6 @@
//
// Tests of profiles generator and utilities.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-
#include "v8.h"
#include "profile-generator-inl.h"
#include "cctest.h"
@@ -875,7 +872,7 @@ TEST(RecordStackTraceAtStartProfiling) {
v8::HandleScope scope(isolate);
const char* extensions[] = { "v8/profiler" };
v8::ExtensionConfiguration config(1, extensions);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, &config);
context->Enter();
CpuProfiler* profiler = i::Isolate::Current()->cpu_profiler();
diff --git a/deps/v8/test/cctest/test-random.cc b/deps/v8/test/cctest/test-random.cc
index 0837ab3e36..0a8594c04e 100644
--- a/deps/v8/test/cctest/test-random.cc
+++ b/deps/v8/test/cctest/test-random.cc
@@ -25,10 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "cctest.h"
@@ -85,8 +81,8 @@ TEST(CrankshaftRandom) {
CompileRun("function f() { return Math.random(); }");
- Object* string = FACTORY->InternalizeOneByteString(STATIC_ASCII_VECTOR("f"))->
- ToObjectChecked();
+ Object* string = Isolate::Current()->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("f"))->ToObjectChecked();
MaybeObject* fun_object =
context->global_object()->GetProperty(String::cast(string));
Handle<JSFunction> fun(JSFunction::cast(fun_object->ToObjectChecked()));
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index f311dcc578..ac62b752c0 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -28,10 +28,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "ast.h"
@@ -788,15 +784,22 @@ TEST(MacroAssemblerNativeSimple) {
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4,
Isolate::Current()->runtime_zone());
- uc16 foo_chars[3] = {'f', 'o', 'o'};
- Vector<const uc16> foo(foo_chars, 3);
-
- Label fail;
- m.CheckCharacters(foo, 0, &fail, true);
+ Label fail, backtrack;
+ m.PushBacktrack(&fail);
+ m.CheckNotAtStart(NULL);
+ m.LoadCurrentCharacter(2, NULL);
+ m.CheckNotCharacter('o', NULL);
+ m.LoadCurrentCharacter(1, NULL, false);
+ m.CheckNotCharacter('o', NULL);
+ m.LoadCurrentCharacter(0, NULL, false);
+ m.CheckNotCharacter('f', NULL);
m.WriteCurrentPositionToRegister(0, 0);
+ m.WriteCurrentPositionToRegister(1, 3);
m.AdvanceCurrentPosition(3);
- m.WriteCurrentPositionToRegister(1, 0);
+ m.PushBacktrack(&backtrack);
m.Succeed();
+ m.Bind(&backtrack);
+ m.Backtrack();
m.Bind(&fail);
m.Fail();
@@ -846,15 +849,22 @@ TEST(MacroAssemblerNativeSimpleUC16) {
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 4,
Isolate::Current()->runtime_zone());
- uc16 foo_chars[3] = {'f', 'o', 'o'};
- Vector<const uc16> foo(foo_chars, 3);
-
- Label fail;
- m.CheckCharacters(foo, 0, &fail, true);
+ Label fail, backtrack;
+ m.PushBacktrack(&fail);
+ m.CheckNotAtStart(NULL);
+ m.LoadCurrentCharacter(2, NULL);
+ m.CheckNotCharacter('o', NULL);
+ m.LoadCurrentCharacter(1, NULL, false);
+ m.CheckNotCharacter('o', NULL);
+ m.LoadCurrentCharacter(0, NULL, false);
+ m.CheckNotCharacter('f', NULL);
m.WriteCurrentPositionToRegister(0, 0);
+ m.WriteCurrentPositionToRegister(1, 3);
m.AdvanceCurrentPosition(3);
- m.WriteCurrentPositionToRegister(1, 0);
+ m.PushBacktrack(&backtrack);
m.Succeed();
+ m.Bind(&backtrack);
+ m.Backtrack();
m.Bind(&fail);
m.Fail();
@@ -1353,36 +1363,33 @@ TEST(MacroAssembler) {
RegExpMacroAssemblerIrregexp m(Vector<byte>(codes, 1024),
Isolate::Current()->runtime_zone());
// ^f(o)o.
- Label fail, fail2, start;
- uc16 foo_chars[3];
- foo_chars[0] = 'f';
- foo_chars[1] = 'o';
- foo_chars[2] = 'o';
- Vector<const uc16> foo(foo_chars, 3);
+ Label start, fail, backtrack;
+
m.SetRegister(4, 42);
m.PushRegister(4, RegExpMacroAssembler::kNoStackLimitCheck);
m.AdvanceRegister(4, 42);
m.GoTo(&start);
m.Fail();
m.Bind(&start);
- m.PushBacktrack(&fail2);
- m.CheckCharacters(foo, 0, &fail, true);
+ m.PushBacktrack(&fail);
+ m.CheckNotAtStart(NULL);
+ m.LoadCurrentCharacter(0, NULL);
+ m.CheckNotCharacter('f', NULL);
+ m.LoadCurrentCharacter(1, NULL);
+ m.CheckNotCharacter('o', NULL);
+ m.LoadCurrentCharacter(2, NULL);
+ m.CheckNotCharacter('o', NULL);
m.WriteCurrentPositionToRegister(0, 0);
- m.PushCurrentPosition();
+ m.WriteCurrentPositionToRegister(1, 3);
+ m.WriteCurrentPositionToRegister(2, 1);
+ m.WriteCurrentPositionToRegister(3, 2);
m.AdvanceCurrentPosition(3);
- m.WriteCurrentPositionToRegister(1, 0);
- m.PopCurrentPosition();
- m.AdvanceCurrentPosition(1);
- m.WriteCurrentPositionToRegister(2, 0);
- m.AdvanceCurrentPosition(1);
- m.WriteCurrentPositionToRegister(3, 0);
+ m.PushBacktrack(&backtrack);
m.Succeed();
-
- m.Bind(&fail);
+ m.Bind(&backtrack);
+ m.ClearRegisters(2, 3);
m.Backtrack();
- m.Succeed();
-
- m.Bind(&fail2);
+ m.Bind(&fail);
m.PopRegister(0);
m.Fail();
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 0cf80440f6..8973d54178 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -29,10 +29,6 @@
#include "sys/stat.h"
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "debug.h"
@@ -290,14 +286,15 @@ static void Deserialize() {
static void SanityCheck() {
+ Isolate* isolate = Isolate::Current();
v8::HandleScope scope(v8::Isolate::GetCurrent());
#ifdef VERIFY_HEAP
HEAP->Verify();
#endif
- CHECK(Isolate::Current()->global_object()->IsJSObject());
- CHECK(Isolate::Current()->native_context()->IsContext());
+ CHECK(isolate->global_object()->IsJSObject());
+ CHECK(isolate->native_context()->IsContext());
CHECK(HEAP->string_table()->IsStringTable());
- CHECK(!FACTORY->InternalizeOneByteString(
+ CHECK(!isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("Empty"))->IsFailure());
}
@@ -373,16 +370,19 @@ TEST(PartialSerialization) {
Serializer::Enable();
v8::V8::Initialize();
Isolate* isolate = Isolate::Current();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
Heap* heap = isolate->heap();
v8::Persistent<v8::Context> env;
{
HandleScope scope(isolate);
- env.Reset(v8::Isolate::GetCurrent(),
- v8::Context::New(v8::Isolate::GetCurrent()));
+ env.Reset(v8_isolate, v8::Context::New(v8_isolate));
}
ASSERT(!env.IsEmpty());
- env->Enter();
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+ }
// Make sure all builtin scripts are cached.
{ HandleScope scope(isolate);
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
@@ -394,7 +394,7 @@ TEST(PartialSerialization) {
Object* raw_foo;
{
- v8::HandleScope handle_scope(env->GetIsolate());
+ v8::HandleScope handle_scope(v8_isolate);
v8::Local<v8::String> foo = v8::String::New("foo");
ASSERT(!foo.IsEmpty());
raw_foo = *(v8::Utils::OpenHandle(*foo));
@@ -404,8 +404,11 @@ TEST(PartialSerialization) {
Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- env->Exit();
- env.Dispose(env->GetIsolate());
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+ }
+ env.Dispose(v8_isolate);
FileByteSink startup_sink(startup_name.start());
StartupSerializer startup_serializer(&startup_sink);
@@ -512,16 +515,19 @@ TEST(ContextSerialization) {
Serializer::Enable();
v8::V8::Initialize();
Isolate* isolate = Isolate::Current();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
Heap* heap = isolate->heap();
v8::Persistent<v8::Context> env;
{
HandleScope scope(isolate);
- env.Reset(v8::Isolate::GetCurrent(),
- v8::Context::New(v8::Isolate::GetCurrent()));
+ env.Reset(v8_isolate, v8::Context::New(v8_isolate));
}
ASSERT(!env.IsEmpty());
- env->Enter();
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+ }
// Make sure all builtin scripts are cached.
{ HandleScope scope(isolate);
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
@@ -536,11 +542,14 @@ TEST(ContextSerialization) {
Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- env->Exit();
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+ }
Object* raw_context = *(v8::Utils::OpenHandle(*env));
- env.Dispose(env->GetIsolate());
+ env.Dispose(v8_isolate);
FileByteSink startup_sink(startup_name.start());
StartupSerializer startup_serializer(&startup_sink);
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 77e8e1b967..12d71e9144 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -32,10 +32,6 @@
#include <stdlib.h>
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "api.h"
@@ -140,7 +136,9 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
RandomNumberGenerator* rng) {
// A list of pointers that we don't have any interest in cleaning up.
// If they are reachable from a root then leak detection won't complain.
- Zone* zone = Isolate::Current()->runtime_zone();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ Zone* zone = isolate->runtime_zone();
for (int i = 0; i < bb_length; i++) {
int len = rng->next(16);
int slice_head_chars = 0;
@@ -172,7 +170,7 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
buf[j] = rng->next(0x10000);
}
building_blocks[i] =
- FACTORY->NewStringFromTwoByte(Vector<const uc16>(buf, len));
+ factory->NewStringFromTwoByte(Vector<const uc16>(buf, len));
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
@@ -184,7 +182,7 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
buf[j] = rng->next(0x80);
}
building_blocks[i] =
- FACTORY->NewStringFromAscii(Vector<const char>(buf, len));
+ factory->NewStringFromAscii(Vector<const char>(buf, len));
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
@@ -196,7 +194,7 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
buf[j] = rng->next(0x10000);
}
Resource* resource = new(zone) Resource(Vector<const uc16>(buf, len));
- building_blocks[i] = FACTORY->NewExternalStringFromTwoByte(resource);
+ building_blocks[i] = factory->NewExternalStringFromTwoByte(resource);
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
@@ -209,7 +207,7 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
}
AsciiResource* resource =
new(zone) AsciiResource(Vector<const char>(buf, len));
- building_blocks[i] = FACTORY->NewExternalStringFromAscii(resource);
+ building_blocks[i] = factory->NewExternalStringFromAscii(resource);
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
@@ -217,7 +215,7 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
}
}
for (int j = slice_depth; j > 0; j--) {
- building_blocks[i] = FACTORY->NewSubString(
+ building_blocks[i] = factory->NewSubString(
building_blocks[i],
slice_head_chars,
building_blocks[i]->length() - slice_tail_chars);
@@ -349,7 +347,7 @@ void AccumulateStats(ConsString* cons_string, ConsStringStats* stats) {
void AccumulateStats(Handle<String> cons_string, ConsStringStats* stats) {
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
if (cons_string->IsConsString()) {
return AccumulateStats(ConsString::cast(*cons_string), stats);
}
@@ -404,6 +402,7 @@ void VerifyConsString(Handle<String> root, ConsStringGenerationData* data) {
static Handle<String> ConstructRandomString(ConsStringGenerationData* data,
unsigned max_recursion) {
+ Factory* factory = Isolate::Current()->factory();
// Compute termination characteristics.
bool terminate = false;
bool flat = data->rng_.next(data->empty_leaf_threshold_);
@@ -450,7 +449,7 @@ static Handle<String> ConstructRandomString(ConsStringGenerationData* data,
left = ConstructRandomString(data, max_recursion - 1);
}
// Build the cons string.
- Handle<String> root = FACTORY->NewConsString(left, right);
+ Handle<String> root = factory->NewConsString(left, right);
CHECK(root->IsConsString() && !root->IsFlat());
// Special work needed for flat string.
if (flat) {
@@ -465,11 +464,12 @@ static Handle<String> ConstructRandomString(ConsStringGenerationData* data,
static Handle<String> ConstructLeft(
ConsStringGenerationData* data,
int depth) {
- Handle<String> answer = FACTORY->NewStringFromAscii(CStrVector(""));
+ Factory* factory = Isolate::Current()->factory();
+ Handle<String> answer = factory->NewStringFromAscii(CStrVector(""));
data->stats_.leaves_++;
for (int i = 0; i < depth; i++) {
Handle<String> block = data->block(i);
- Handle<String> next = FACTORY->NewConsString(answer, block);
+ Handle<String> next = factory->NewConsString(answer, block);
if (next->IsConsString()) data->stats_.leaves_++;
data->stats_.chars_ += block->length();
answer = next;
@@ -482,11 +482,12 @@ static Handle<String> ConstructLeft(
static Handle<String> ConstructRight(
ConsStringGenerationData* data,
int depth) {
- Handle<String> answer = FACTORY->NewStringFromAscii(CStrVector(""));
+ Factory* factory = Isolate::Current()->factory();
+ Handle<String> answer = factory->NewStringFromAscii(CStrVector(""));
data->stats_.leaves_++;
for (int i = depth - 1; i >= 0; i--) {
Handle<String> block = data->block(i);
- Handle<String> next = FACTORY->NewConsString(block, answer);
+ Handle<String> next = factory->NewConsString(block, answer);
if (next->IsConsString()) data->stats_.leaves_++;
data->stats_.chars_ += block->length();
answer = next;
@@ -500,6 +501,7 @@ static Handle<String> ConstructBalancedHelper(
ConsStringGenerationData* data,
int from,
int to) {
+ Factory* factory = Isolate::Current()->factory();
CHECK(to > from);
if (to - from == 1) {
data->stats_.chars_ += data->block(from)->length();
@@ -508,7 +510,7 @@ static Handle<String> ConstructBalancedHelper(
if (to - from == 2) {
data->stats_.chars_ += data->block(from)->length();
data->stats_.chars_ += data->block(from+1)->length();
- return FACTORY->NewConsString(data->block(from), data->block(from+1));
+ return factory->NewConsString(data->block(from), data->block(from+1));
}
Handle<String> part1 =
ConstructBalancedHelper(data, from, from + ((to - from) / 2));
@@ -516,7 +518,7 @@ static Handle<String> ConstructBalancedHelper(
ConstructBalancedHelper(data, from + ((to - from) / 2), to);
if (part1->IsConsString()) data->stats_.left_traversals_++;
if (part2->IsConsString()) data->stats_.right_traversals_++;
- return FACTORY->NewConsString(part1, part2);
+ return factory->NewConsString(part1, part2);
}
@@ -674,7 +676,7 @@ void TestStringCharacterStream(BuildString build, int test_cases) {
Handle<String> cons_string = build(i, &data);
ConsStringStats cons_string_stats;
AccumulateStats(cons_string, &cons_string_stats);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
PrintStats(data);
// Full verify of cons string.
cons_string_stats.VerifyEqual(flat_string_stats);
@@ -694,6 +696,7 @@ static const int kCharacterStreamNonRandomCases = 8;
static Handle<String> BuildEdgeCaseConsString(
int test_case, ConsStringGenerationData* data) {
+ Factory* factory = Isolate::Current()->factory();
data->Reset();
switch (test_case) {
case 0:
@@ -711,7 +714,7 @@ static Handle<String> BuildEdgeCaseConsString(
data->stats_.chars_ += data->block(0)->length();
data->stats_.chars_ += data->block(1)->length();
data->stats_.leaves_ += 2;
- return FACTORY->NewConsString(data->block(0), data->block(1));
+ return factory->NewConsString(data->block(0), data->block(1));
case 6:
// Simple flattened tree.
data->stats_.chars_ += data->block(0)->length();
@@ -720,7 +723,7 @@ static Handle<String> BuildEdgeCaseConsString(
data->stats_.empty_leaves_ += 1;
{
Handle<String> string =
- FACTORY->NewConsString(data->block(0), data->block(1));
+ factory->NewConsString(data->block(0), data->block(1));
FlattenString(string);
return string;
}
@@ -734,9 +737,9 @@ static Handle<String> BuildEdgeCaseConsString(
data->stats_.left_traversals_ += 1;
{
Handle<String> left =
- FACTORY->NewConsString(data->block(0), data->block(1));
+ factory->NewConsString(data->block(0), data->block(1));
FlattenString(left);
- return FACTORY->NewConsString(left, data->block(2));
+ return factory->NewConsString(left, data->block(2));
}
case 8:
// Left node and right node flattened.
@@ -750,12 +753,12 @@ static Handle<String> BuildEdgeCaseConsString(
data->stats_.right_traversals_ += 1;
{
Handle<String> left =
- FACTORY->NewConsString(data->block(0), data->block(1));
+ factory->NewConsString(data->block(0), data->block(1));
FlattenString(left);
Handle<String> right =
- FACTORY->NewConsString(data->block(2), data->block(2));
+ factory->NewConsString(data->block(2), data->block(2));
FlattenString(right);
- return FACTORY->NewConsString(left, right);
+ return factory->NewConsString(left, right);
}
}
UNREACHABLE();
@@ -856,6 +859,7 @@ static const int DEEP_ASCII_DEPTH = 100000;
TEST(DeepAscii) {
printf("TestDeepAscii\n");
CcTest::InitializeVM();
+ Factory* factory = Isolate::Current()->factory();
v8::HandleScope scope(CcTest::isolate());
char* foo = NewArray<char>(DEEP_ASCII_DEPTH);
@@ -863,12 +867,12 @@ TEST(DeepAscii) {
foo[i] = "foo "[i % 4];
}
Handle<String> string =
- FACTORY->NewStringFromAscii(Vector<const char>(foo, DEEP_ASCII_DEPTH));
- Handle<String> foo_string = FACTORY->NewStringFromAscii(CStrVector("foo"));
+ factory->NewStringFromAscii(Vector<const char>(foo, DEEP_ASCII_DEPTH));
+ Handle<String> foo_string = factory->NewStringFromAscii(CStrVector("foo"));
for (int i = 0; i < DEEP_ASCII_DEPTH; i += 10) {
- string = FACTORY->NewConsString(string, foo_string);
+ string = factory->NewConsString(string, foo_string);
}
- Handle<String> flat_string = FACTORY->NewConsString(string, foo_string);
+ Handle<String> flat_string = factory->NewConsString(string, foo_string);
FlattenString(flat_string);
for (int i = 0; i < 500; i++) {
@@ -1063,13 +1067,14 @@ TEST(CachedHashOverflow) {
TEST(SliceFromCons) {
FLAG_string_slices = true;
CcTest::InitializeVM();
+ Factory* factory = Isolate::Current()->factory();
v8::HandleScope scope(CcTest::isolate());
Handle<String> string =
- FACTORY->NewStringFromAscii(CStrVector("parentparentparent"));
- Handle<String> parent = FACTORY->NewConsString(string, string);
+ factory->NewStringFromAscii(CStrVector("parentparentparent"));
+ Handle<String> parent = factory->NewConsString(string, string);
CHECK(parent->IsConsString());
CHECK(!parent->IsFlat());
- Handle<String> slice = FACTORY->NewSubString(parent, 1, 25);
+ Handle<String> slice = factory->NewSubString(parent, 1, 25);
// After slicing, the original string becomes a flat cons.
CHECK(parent->IsFlat());
CHECK(slice->IsSlicedString());
@@ -1097,12 +1102,13 @@ class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
TEST(SliceFromExternal) {
FLAG_string_slices = true;
CcTest::InitializeVM();
+ Factory* factory = Isolate::Current()->factory();
v8::HandleScope scope(CcTest::isolate());
AsciiVectorResource resource(
i::Vector<const char>("abcdefghijklmnopqrstuvwxyz", 26));
- Handle<String> string = FACTORY->NewExternalStringFromAscii(&resource);
+ Handle<String> string = factory->NewExternalStringFromAscii(&resource);
CHECK(string->IsExternalString());
- Handle<String> slice = FACTORY->NewSubString(string, 1, 25);
+ Handle<String> slice = factory->NewSubString(string, 1, 25);
CHECK(slice->IsSlicedString());
CHECK(string->IsExternalString());
CHECK_EQ(SlicedString::cast(*slice)->parent(), *string);
@@ -1116,6 +1122,7 @@ TEST(TrivialSlice) {
// actually creates a new string (it should not).
FLAG_string_slices = true;
CcTest::InitializeVM();
+ Factory* factory = Isolate::Current()->factory();
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Value> result;
Handle<String> string;
@@ -1130,7 +1137,7 @@ TEST(TrivialSlice) {
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
CHECK(!string->IsSlicedString());
- string = FACTORY->NewSubString(string, 0, 26);
+ string = factory->NewSubString(string, 0, 26);
CHECK(!string->IsSlicedString());
result = CompileRun(crosscheck);
CHECK(result->IsString());
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index e1b3ea77ef..6a8323bea4 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -5,10 +5,6 @@
// of ConsStrings. These operations may not be very fast, but they
// should be possible without getting errors due to too deep recursion.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "cctest.h"
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 4008663c3e..524a564029 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -25,10 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "platform.h"
#include "cctest.h"
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index edec8bfbe4..3b9c1ad80b 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -25,10 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(dcarney): remove
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
-#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
-
#include "v8.h"
#include "platform.h"
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
new file mode 100644
index 0000000000..3cdfdad7e4
--- /dev/null
+++ b/deps/v8/test/cctest/test-types.cc
@@ -0,0 +1,533 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "cctest.h"
+#include "types.h"
+
+using namespace v8::internal;
+
+// Testing auxiliaries (breaking the Type abstraction).
+static bool IsBitset(Type* type) { return type->IsSmi(); }
+static bool IsClass(Type* type) { return type->IsMap(); }
+static bool IsConstant(Type* type) { return type->IsBox(); }
+static bool IsUnion(Type* type) { return type->IsFixedArray(); }
+
+static int AsBitset(Type* type) { return Smi::cast(type)->value(); }
+static Map* AsClass(Type* type) { return Map::cast(type); }
+static Object* AsConstant(Type* type) { return Box::cast(type)->value(); }
+static FixedArray* AsUnion(Type* type) { return FixedArray::cast(type); }
+
+class HandlifiedTypes {
+ public:
+ explicit HandlifiedTypes(Isolate* isolate) :
+ None(Type::None(), isolate),
+ Any(Type::Any(), isolate),
+ Oddball(Type::Oddball(), isolate),
+ Boolean(Type::Boolean(), isolate),
+ Null(Type::Null(), isolate),
+ Undefined(Type::Undefined(), isolate),
+ Number(Type::Number(), isolate),
+ Smi(Type::Smi(), isolate),
+ Double(Type::Double(), isolate),
+ Name(Type::Name(), isolate),
+ UniqueName(Type::UniqueName(), isolate),
+ String(Type::String(), isolate),
+ InternalizedString(Type::InternalizedString(), isolate),
+ Symbol(Type::Symbol(), isolate),
+ Receiver(Type::Receiver(), isolate),
+ Object(Type::Object(), isolate),
+ Array(Type::Array(), isolate),
+ Function(Type::Function(), isolate),
+ Proxy(Type::Proxy(), isolate),
+ object_map(isolate->factory()->NewMap(JS_OBJECT_TYPE, 3 * kPointerSize)),
+ array_map(isolate->factory()->NewMap(JS_ARRAY_TYPE, 4 * kPointerSize)),
+ isolate_(isolate) {
+ smi = handle(Smi::FromInt(666), isolate);
+ object1 = isolate->factory()->NewJSObjectFromMap(object_map);
+ object2 = isolate->factory()->NewJSObjectFromMap(object_map);
+ array = isolate->factory()->NewJSArray(20);
+ ObjectClass = handle(Type::Class(object_map), isolate);
+ ArrayClass = handle(Type::Class(array_map), isolate);
+ SmiConstant = handle(Type::Constant(smi, isolate), isolate);
+ ObjectConstant1 = handle(Type::Constant(object1), isolate);
+ ObjectConstant2 = handle(Type::Constant(object2), isolate);
+ ArrayConstant = handle(Type::Constant(array), isolate);
+ }
+
+ Handle<Type> None;
+ Handle<Type> Any;
+ Handle<Type> Oddball;
+ Handle<Type> Boolean;
+ Handle<Type> Null;
+ Handle<Type> Undefined;
+ Handle<Type> Number;
+ Handle<Type> Smi;
+ Handle<Type> Double;
+ Handle<Type> Name;
+ Handle<Type> UniqueName;
+ Handle<Type> String;
+ Handle<Type> InternalizedString;
+ Handle<Type> Symbol;
+ Handle<Type> Receiver;
+ Handle<Type> Object;
+ Handle<Type> Array;
+ Handle<Type> Function;
+ Handle<Type> Proxy;
+
+ Handle<Type> ObjectClass;
+ Handle<Type> ArrayClass;
+
+ Handle<Type> SmiConstant;
+ Handle<Type> ObjectConstant1;
+ Handle<Type> ObjectConstant2;
+ Handle<Type> ArrayConstant;
+
+ Handle<Map> object_map;
+ Handle<Map> array_map;
+
+ Handle<v8::internal::Smi> smi;
+ Handle<JSObject> object1;
+ Handle<JSObject> object2;
+ Handle<JSArray> array;
+
+ Handle<Type> Union(Handle<Type> type1, Handle<Type> type2) {
+ return handle(Type::Union(type1, type2), isolate_);
+ }
+
+ private:
+ Isolate* isolate_;
+};
+
+
+TEST(Bitset) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+ HandlifiedTypes T(isolate);
+
+ CHECK(IsBitset(*T.None));
+ CHECK(IsBitset(*T.Any));
+ CHECK(IsBitset(*T.String));
+ CHECK(IsBitset(*T.Object));
+
+ CHECK(IsBitset(Type::Union(T.String, T.Number)));
+ CHECK(IsBitset(Type::Union(T.String, T.Receiver)));
+ CHECK(IsBitset(Type::Optional(T.Object)));
+
+ CHECK_EQ(0, AsBitset(*T.None));
+ CHECK_EQ(AsBitset(*T.Number) | AsBitset(*T.String),
+ AsBitset(Type::Union(T.String, T.Number)));
+ CHECK_EQ(AsBitset(*T.Receiver),
+ AsBitset(Type::Union(T.Receiver, T.Object)));
+ CHECK_EQ(AsBitset(*T.String) | AsBitset(*T.Undefined),
+ AsBitset(Type::Optional(T.String)));
+}
+
+
+TEST(Class) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+ HandlifiedTypes T(isolate);
+
+ CHECK(IsClass(*T.ObjectClass));
+ CHECK(IsClass(*T.ArrayClass));
+
+ CHECK(*T.object_map == AsClass(*T.ObjectClass));
+ CHECK(*T.array_map == AsClass(*T.ArrayClass));
+}
+
+
+TEST(Constant) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+ HandlifiedTypes T(isolate);
+
+ CHECK(IsConstant(*T.SmiConstant));
+ CHECK(IsConstant(*T.ObjectConstant1));
+ CHECK(IsConstant(*T.ObjectConstant2));
+ CHECK(IsConstant(*T.ArrayConstant));
+
+ CHECK(*T.smi == AsConstant(*T.SmiConstant));
+ CHECK(*T.object1 == AsConstant(*T.ObjectConstant1));
+ CHECK(*T.object2 == AsConstant(*T.ObjectConstant2));
+ CHECK(*T.object1 != AsConstant(*T.ObjectConstant2));
+ CHECK(*T.array == AsConstant(*T.ArrayConstant));
+}
+
+
+static void CheckSub(Handle<Type> type1, Handle<Type> type2) {
+ CHECK(type1->Is(type2));
+ CHECK(!type2->Is(type1));
+ if (IsBitset(*type1) && IsBitset(*type2)) {
+ CHECK_NE(AsBitset(*type1), AsBitset(*type2));
+ }
+}
+
+static void CheckUnordered(Handle<Type> type1, Handle<Type> type2) {
+ CHECK(!type1->Is(type2));
+ CHECK(!type2->Is(type1));
+ if (IsBitset(*type1) && IsBitset(*type2)) {
+ CHECK_NE(AsBitset(*type1), AsBitset(*type2));
+ }
+}
+
+TEST(Is) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+ HandlifiedTypes T(isolate);
+
+ // Reflexivity
+ CHECK(T.None->Is(T.None));
+ CHECK(T.Any->Is(T.Any));
+ CHECK(T.Object->Is(T.Object));
+
+ CHECK(T.ObjectClass->Is(T.ObjectClass));
+ CHECK(T.ObjectConstant1->Is(T.ObjectConstant1));
+
+ // Symmetry and Transitivity
+ CheckSub(T.None, T.Number);
+ CheckSub(T.None, T.Any);
+
+ CheckSub(T.Oddball, T.Any);
+ CheckSub(T.Boolean, T.Oddball);
+ CheckSub(T.Null, T.Oddball);
+ CheckSub(T.Undefined, T.Oddball);
+ CheckUnordered(T.Boolean, T.Null);
+ CheckUnordered(T.Undefined, T.Null);
+ CheckUnordered(T.Boolean, T.Undefined);
+
+ CheckSub(T.Number, T.Any);
+ CheckSub(T.Smi, T.Number);
+ CheckSub(T.Double, T.Number);
+ CheckUnordered(T.Smi, T.Double);
+
+ CheckSub(T.Name, T.Any);
+ CheckSub(T.UniqueName, T.Any);
+ CheckSub(T.UniqueName, T.Name);
+ CheckSub(T.String, T.Name);
+ CheckSub(T.InternalizedString, T.String);
+ CheckSub(T.InternalizedString, T.UniqueName);
+ CheckSub(T.InternalizedString, T.Name);
+ CheckSub(T.Symbol, T.UniqueName);
+ CheckSub(T.Symbol, T.Name);
+ CheckUnordered(T.String, T.UniqueName);
+ CheckUnordered(T.String, T.Symbol);
+ CheckUnordered(T.InternalizedString, T.Symbol);
+
+ CheckSub(T.Receiver, T.Any);
+ CheckSub(T.Object, T.Any);
+ CheckSub(T.Object, T.Receiver);
+ CheckSub(T.Array, T.Object);
+ CheckSub(T.Function, T.Object);
+ CheckSub(T.Proxy, T.Receiver);
+ CheckUnordered(T.Object, T.Proxy);
+ CheckUnordered(T.Array, T.Function);
+
+ // Structured subtyping
+ CheckSub(T.ObjectClass, T.Object);
+ CheckSub(T.ArrayClass, T.Object);
+ CheckUnordered(T.ObjectClass, T.ArrayClass);
+
+ CheckSub(T.SmiConstant, T.Smi);
+ CheckSub(T.SmiConstant, T.Number);
+ CheckSub(T.ObjectConstant1, T.Object);
+ CheckSub(T.ObjectConstant2, T.Object);
+ CheckSub(T.ArrayConstant, T.Object);
+ CheckSub(T.ArrayConstant, T.Array);
+ CheckUnordered(T.ObjectConstant1, T.ObjectConstant2);
+ CheckUnordered(T.ObjectConstant1, T.ArrayConstant);
+
+ CheckUnordered(T.ObjectConstant1, T.ObjectClass);
+ CheckUnordered(T.ObjectConstant2, T.ObjectClass);
+ CheckUnordered(T.ObjectConstant1, T.ArrayClass);
+ CheckUnordered(T.ObjectConstant2, T.ArrayClass);
+ CheckUnordered(T.ArrayConstant, T.ObjectClass);
+}
+
+
+static void CheckOverlap(Handle<Type> type1, Handle<Type> type2) {
+ CHECK(type1->Maybe(type2));
+ CHECK(type2->Maybe(type1));
+ if (IsBitset(*type1) && IsBitset(*type2)) {
+ CHECK_NE(0, AsBitset(*type1) & AsBitset(*type2));
+ }
+}
+
+static void CheckDisjoint(Handle<Type> type1, Handle<Type> type2) {
+ CHECK(!type1->Is(type2));
+ CHECK(!type2->Is(type1));
+ CHECK(!type1->Maybe(type2));
+ CHECK(!type2->Maybe(type1));
+ if (IsBitset(*type1) && IsBitset(*type2)) {
+ CHECK_EQ(0, AsBitset(*type1) & AsBitset(*type2));
+ }
+}
+
+TEST(Maybe) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+ HandlifiedTypes T(isolate);
+
+ CheckOverlap(T.Any, T.Any);
+ CheckOverlap(T.Object, T.Object);
+
+ CheckOverlap(T.Oddball, T.Any);
+ CheckOverlap(T.Boolean, T.Oddball);
+ CheckOverlap(T.Null, T.Oddball);
+ CheckOverlap(T.Undefined, T.Oddball);
+ CheckDisjoint(T.Boolean, T.Null);
+ CheckDisjoint(T.Undefined, T.Null);
+ CheckDisjoint(T.Boolean, T.Undefined);
+
+ CheckOverlap(T.Number, T.Any);
+ CheckOverlap(T.Smi, T.Number);
+ CheckOverlap(T.Double, T.Number);
+ CheckDisjoint(T.Smi, T.Double);
+
+ CheckOverlap(T.Name, T.Any);
+ CheckOverlap(T.UniqueName, T.Any);
+ CheckOverlap(T.UniqueName, T.Name);
+ CheckOverlap(T.String, T.Name);
+ CheckOverlap(T.InternalizedString, T.String);
+ CheckOverlap(T.InternalizedString, T.UniqueName);
+ CheckOverlap(T.InternalizedString, T.Name);
+ CheckOverlap(T.Symbol, T.UniqueName);
+ CheckOverlap(T.Symbol, T.Name);
+ CheckOverlap(T.String, T.UniqueName);
+ CheckDisjoint(T.String, T.Symbol);
+ CheckDisjoint(T.InternalizedString, T.Symbol);
+
+ CheckOverlap(T.Receiver, T.Any);
+ CheckOverlap(T.Object, T.Any);
+ CheckOverlap(T.Object, T.Receiver);
+ CheckOverlap(T.Array, T.Object);
+ CheckOverlap(T.Function, T.Object);
+ CheckOverlap(T.Proxy, T.Receiver);
+ CheckDisjoint(T.Object, T.Proxy);
+ CheckDisjoint(T.Array, T.Function);
+
+ CheckOverlap(T.ObjectClass, T.Object);
+ CheckOverlap(T.ArrayClass, T.Object);
+ CheckOverlap(T.ObjectClass, T.ObjectClass);
+ CheckOverlap(T.ArrayClass, T.ArrayClass);
+ CheckDisjoint(T.ObjectClass, T.ArrayClass);
+
+ CheckOverlap(T.SmiConstant, T.Smi);
+ CheckOverlap(T.SmiConstant, T.Number);
+ CheckDisjoint(T.SmiConstant, T.Double);
+ CheckOverlap(T.ObjectConstant1, T.Object);
+ CheckOverlap(T.ObjectConstant2, T.Object);
+ CheckOverlap(T.ArrayConstant, T.Object);
+ CheckOverlap(T.ArrayConstant, T.Array);
+ CheckOverlap(T.ObjectConstant1, T.ObjectConstant1);
+ CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayConstant);
+
+ CheckDisjoint(T.ObjectConstant1, T.ObjectClass);
+ CheckDisjoint(T.ObjectConstant2, T.ObjectClass);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayClass);
+ CheckDisjoint(T.ObjectConstant2, T.ArrayClass);
+ CheckDisjoint(T.ArrayConstant, T.ObjectClass);
+}
+
+
+static void CheckEqual(Handle<Type> type1, Handle<Type> type2) {
+ CHECK_EQ(IsBitset(*type1), IsBitset(*type2));
+ CHECK_EQ(IsClass(*type1), IsClass(*type2));
+ CHECK_EQ(IsConstant(*type1), IsConstant(*type2));
+ CHECK_EQ(IsUnion(*type1), IsUnion(*type2));
+ if (IsBitset(*type1)) {
+ CHECK_EQ(AsBitset(*type1), AsBitset(*type2));
+ } else if (IsClass(*type1)) {
+ CHECK_EQ(AsClass(*type1), AsClass(*type2));
+ } else if (IsConstant(*type1)) {
+ CHECK_EQ(AsConstant(*type1), AsConstant(*type2));
+ } else if (IsUnion(*type1)) {
+ CHECK_EQ(AsUnion(*type1)->length(), AsUnion(*type2)->length());
+ }
+ CHECK(type1->Is(type2));
+ CHECK(type2->Is(type1));
+}
+
+TEST(Union) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+ HandlifiedTypes T(isolate);
+
+ // Bitset-bitset
+ CHECK(IsBitset(Type::Union(T.Object, T.Number)));
+ CHECK(IsBitset(Type::Union(T.Object, T.Object)));
+ CHECK(IsBitset(Type::Union(T.Any, T.None)));
+
+ CheckEqual(T.Union(T.None, T.Number), T.Number);
+ CheckEqual(T.Union(T.Object, T.Proxy), T.Receiver);
+ CheckEqual(T.Union(T.Number, T.String), T.Union(T.String, T.Number));
+ CheckSub(T.Union(T.Number, T.String), T.Any);
+
+ // Class-class
+ CHECK(IsClass(Type::Union(T.ObjectClass, T.ObjectClass)));
+ CHECK(IsUnion(Type::Union(T.ObjectClass, T.ArrayClass)));
+
+ CheckEqual(T.Union(T.ObjectClass, T.ObjectClass), T.ObjectClass);
+ CheckSub(T.ObjectClass, T.Union(T.ObjectClass, T.ArrayClass));
+ CheckSub(T.ArrayClass, T.Union(T.ObjectClass, T.ArrayClass));
+ CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Object);
+ CheckUnordered(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
+ CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
+ CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number);
+
+ // Constant-constant
+ CHECK(IsConstant(Type::Union(T.ObjectConstant1, T.ObjectConstant1)));
+ CHECK(IsUnion(Type::Union(T.ObjectConstant1, T.ObjectConstant2)));
+
+ CheckEqual(T.Union(T.ObjectConstant1, T.ObjectConstant1), T.ObjectConstant1);
+ CheckSub(T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2));
+ CheckSub(T.ObjectConstant2, T.Union(T.ObjectConstant1, T.ObjectConstant2));
+ CheckSub(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.Object);
+ CheckUnordered(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ObjectClass);
+ CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayConstant), T.Array);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant), T.Array);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant), T.Number);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant), T.ObjectClass);
+
+ // Bitset-class
+ CHECK(IsBitset(Type::Union(T.ObjectClass, T.Object)));
+ CHECK(IsUnion(Type::Union(T.ObjectClass, T.Number)));
+
+ CheckEqual(T.Union(T.ObjectClass, T.Object), T.Object);
+ CheckSub(T.Union(T.ObjectClass, T.Number), T.Any);
+ CheckSub(T.Union(T.ObjectClass, T.Smi), T.Union(T.Object, T.Number));
+ CheckSub(T.Union(T.ObjectClass, T.Array), T.Object);
+ CheckUnordered(T.Union(T.ObjectClass, T.String), T.Array);
+ CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
+ CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number);
+
+ // Bitset-constant
+ CHECK(IsBitset(Type::Union(T.SmiConstant, T.Number)));
+ CHECK(IsBitset(Type::Union(T.ObjectConstant1, T.Object)));
+ CHECK(IsUnion(Type::Union(T.ObjectConstant2, T.Number)));
+
+ CheckEqual(T.Union(T.SmiConstant, T.Number), T.Number);
+ CheckEqual(T.Union(T.ObjectConstant1, T.Object), T.Object);
+ CheckSub(T.Union(T.ObjectConstant1, T.Number), T.Any);
+ CheckSub(T.Union(T.ObjectConstant1, T.Smi), T.Union(T.Object, T.Number));
+ CheckSub(T.Union(T.ObjectConstant1, T.Array), T.Object);
+ CheckUnordered(T.Union(T.ObjectConstant1, T.String), T.Array);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number);
+
+ // Class-constant
+ CHECK(IsUnion(Type::Union(T.ObjectConstant1, T.ObjectClass)));
+ CHECK(IsUnion(Type::Union(T.ArrayClass, T.ObjectConstant2)));
+
+ CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Object);
+ CheckSub(T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ArrayClass));
+ CheckSub(T.ArrayClass, T.Union(T.ObjectConstant1, T.ArrayClass));
+ CheckUnordered(T.ObjectClass, T.Union(T.ObjectConstant1, T.ArrayClass));
+ CheckSub(
+ T.Union(T.ObjectConstant1, T.ArrayClass), T.Union(T.Array, T.Object));
+ CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayClass), T.ArrayConstant);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass);
+
+ // Bitset-union
+ CHECK(IsBitset(
+ Type::Union(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass))));
+ CHECK(IsUnion(
+ Type::Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.Number)));
+
+ CheckEqual(
+ T.Union(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Object);
+ CheckEqual(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number),
+ T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
+ CheckSub(
+ T.Double,
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number));
+ CheckSub(
+ T.ObjectConstant1,
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
+ CheckSub(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
+ T.Any);
+ CheckSub(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
+ T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
+
+ // Class-union
+ CHECK(IsUnion(
+ Type::Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass)));
+ CHECK(IsUnion(
+ Type::Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ObjectClass)));
+
+ CheckEqual(
+ T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Union(T.ObjectClass, T.ObjectConstant1));
+ CheckSub(
+ T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Object);
+ CheckEqual(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass),
+ T.Union(T.ArrayClass, T.ObjectConstant2));
+
+ // Constant-union
+ CHECK(IsUnion(Type::Union(
+ T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2))));
+ CHECK(IsUnion(Type::Union(
+ T.Union(T.ArrayConstant, T.ObjectClass), T.ObjectConstant1)));
+ CHECK(IsUnion(Type::Union(
+ T.Union(T.ArrayConstant, T.ObjectConstant2), T.ObjectConstant1)));
+
+ CheckEqual(
+ T.Union(T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2)),
+ T.Union(T.ObjectConstant2, T.ObjectConstant1));
+ CheckEqual(
+ T.Union(T.Union(T.ArrayConstant, T.ObjectConstant2), T.ObjectConstant1),
+ T.Union(T.ObjectConstant2, T.Union(T.ArrayConstant, T.ObjectConstant1)));
+
+ // Union-union
+ CHECK(IsBitset(
+ Type::Union(T.Union(T.Number, T.ArrayClass), T.Union(T.Smi, T.Array))));
+
+ CheckEqual(
+ T.Union(T.Union(T.ObjectConstant2, T.ObjectConstant1),
+ T.Union(T.ObjectConstant1, T.ObjectConstant2)),
+ T.Union(T.ObjectConstant2, T.ObjectConstant1));
+ CheckEqual(
+ T.Union(T.Union(T.ObjectConstant2, T.ArrayConstant),
+ T.Union(T.ObjectConstant1, T.ArrayConstant)),
+ T.Union(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ArrayConstant));
+ CheckEqual(
+ T.Union(T.Union(T.Number, T.ArrayClass), T.Union(T.Smi, T.Array)),
+ T.Union(T.Number, T.Array));
+}
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 499286c46f..9044f17e4e 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -114,8 +114,7 @@ TEST(Weakness) {
HandleScope scope(isolate);
global_handles->MakeWeak(key.location(),
reinterpret_cast<void*>(1234),
- &WeakPointerCallback,
- NULL);
+ &WeakPointerCallback);
}
CHECK(global_handles->IsWeak(key.location()));
diff --git a/deps/v8/test/cctest/test-weaktypedarrays.cc b/deps/v8/test/cctest/test-weaktypedarrays.cc
new file mode 100644
index 0000000000..aef610d496
--- /dev/null
+++ b/deps/v8/test/cctest/test-weaktypedarrays.cc
@@ -0,0 +1,380 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+#include "api.h"
+#include "heap.h"
+#include "objects.h"
+
+#include "cctest.h"
+
+using namespace v8::internal;
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+static int CountArrayBuffersInWeakList(Heap* heap) {
+ int count = 0;
+ for (Object* o = heap->array_buffers_list();
+ o != Smi::FromInt(0);
+ o = JSArrayBuffer::cast(o)->weak_next()) {
+ count++;
+ }
+ return count;
+}
+
+
+static bool HasArrayBufferInWeakList(Heap* heap, JSArrayBuffer* ab) {
+ for (Object* o = heap->array_buffers_list();
+ o != Smi::FromInt(0);
+ o = JSArrayBuffer::cast(o)->weak_next()) {
+ if (ab == o) return true;
+ }
+ return false;
+}
+
+
+static int CountTypedArrays(JSArrayBuffer* array_buffer) {
+ int count = 0;
+ for (Object* o = array_buffer->weak_first_array();
+ o != Smi::FromInt(0);
+ o = JSTypedArray::cast(o)->weak_next()) {
+ count++;
+ }
+
+ return count;
+}
+
+static bool HasTypedArrayInWeakList(JSArrayBuffer* array_buffer,
+ JSTypedArray* ta) {
+ for (Object* o = array_buffer->weak_first_array();
+ o != Smi::FromInt(0);
+ o = JSTypedArray::cast(o)->weak_next()) {
+ if (ta == o) return true;
+ }
+ return false;
+}
+
+
+TEST(WeakArrayBuffersFromApi) {
+ v8::V8::Initialize();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+
+ CHECK_EQ(0, CountArrayBuffersInWeakList(isolate->heap()));
+ {
+ v8::HandleScope s1(context->GetIsolate());
+ v8::Handle<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::New(256);
+ {
+ v8::HandleScope s2(context->GetIsolate());
+ v8::Handle<v8::ArrayBuffer> ab2 = v8::ArrayBuffer::New(128);
+
+ Handle<JSArrayBuffer> iab1 = v8::Utils::OpenHandle(*ab1);
+ Handle<JSArrayBuffer> iab2 = v8::Utils::OpenHandle(*ab2);
+ CHECK_EQ(2, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK(HasArrayBufferInWeakList(isolate->heap(), *iab1));
+ CHECK(HasArrayBufferInWeakList(isolate->heap(), *iab2));
+ }
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()));
+ {
+ HandleScope scope2(isolate);
+ Handle<JSArrayBuffer> iab1 = v8::Utils::OpenHandle(*ab1);
+
+ CHECK(HasArrayBufferInWeakList(isolate->heap(), *iab1));
+ }
+ }
+
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ CHECK_EQ(0, CountArrayBuffersInWeakList(isolate->heap()));
+}
+
+
+TEST(WeakArrayBuffersFromScript) {
+ v8::V8::Initialize();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+
+ for (int i = 1; i <= 3; i++) {
+ // Create 3 array buffers, make i-th of them garbage,
+ // validate correct state of array buffer weak list.
+ CHECK_EQ(0, CountArrayBuffersInWeakList(isolate->heap()));
+ {
+ v8::HandleScope scope(context->GetIsolate());
+
+ {
+ v8::HandleScope s1(context->GetIsolate());
+ CompileRun("var ab1 = new ArrayBuffer(256);"
+ "var ab2 = new ArrayBuffer(256);"
+ "var ab3 = new ArrayBuffer(256);");
+ v8::Handle<v8::ArrayBuffer> ab1(
+ v8::ArrayBuffer::Cast(*CompileRun("ab1")));
+ v8::Handle<v8::ArrayBuffer> ab2(
+ v8::ArrayBuffer::Cast(*CompileRun("ab2")));
+ v8::Handle<v8::ArrayBuffer> ab3(
+ v8::ArrayBuffer::Cast(*CompileRun("ab3")));
+
+ CHECK_EQ(3, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK(HasArrayBufferInWeakList(isolate->heap(),
+ *v8::Utils::OpenHandle(*ab1)));
+ CHECK(HasArrayBufferInWeakList(isolate->heap(),
+ *v8::Utils::OpenHandle(*ab2)));
+ CHECK(HasArrayBufferInWeakList(isolate->heap(),
+ *v8::Utils::OpenHandle(*ab3)));
+ }
+
+ i::ScopedVector<char> source(1024);
+ i::OS::SNPrintF(source, "ab%d = null;", i);
+ CompileRun(source.start());
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+
+ CHECK_EQ(2, CountArrayBuffersInWeakList(isolate->heap()));
+
+ {
+ v8::HandleScope s2(context->GetIsolate());
+ for (int j = 1; j <= 3; j++) {
+ if (j == i) continue;
+ i::OS::SNPrintF(source, "ab%d", j);
+ v8::Handle<v8::ArrayBuffer> ab(
+ v8::ArrayBuffer::Cast(*CompileRun(source.start())));
+ CHECK(HasArrayBufferInWeakList(isolate->heap(),
+ *v8::Utils::OpenHandle(*ab)));
+ }
+ }
+
+ CompileRun("ab1 = null; ab2 = null; ab3 = null;");
+ }
+
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ CHECK_EQ(0, CountArrayBuffersInWeakList(isolate->heap()));
+ }
+}
+
+template <typename TypedArray>
+void TestTypedArrayFromApi() {
+ v8::V8::Initialize();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+
+ v8::HandleScope s1(context->GetIsolate());
+ v8::Handle<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(2048);
+ Handle<JSArrayBuffer> iab = v8::Utils::OpenHandle(*ab);
+ {
+ v8::HandleScope s2(context->GetIsolate());
+ v8::Handle<TypedArray> ta1 = TypedArray::New(ab, 0, 256);
+ {
+ v8::HandleScope s3(context->GetIsolate());
+ v8::Handle<TypedArray> ta2 = TypedArray::New(ab, 0, 128);
+
+ Handle<JSTypedArray> ita1 = v8::Utils::OpenHandle(*ta1);
+ Handle<JSTypedArray> ita2 = v8::Utils::OpenHandle(*ta2);
+ CHECK_EQ(2, CountTypedArrays(*iab));
+ CHECK(HasTypedArrayInWeakList(*iab, *ita1));
+ CHECK(HasTypedArrayInWeakList(*iab, *ita2));
+ }
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ CHECK_EQ(1, CountTypedArrays(*iab));
+ Handle<JSTypedArray> ita1 = v8::Utils::OpenHandle(*ta1);
+ CHECK(HasTypedArrayInWeakList(*iab, *ita1));
+ }
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+
+ CHECK_EQ(0, CountTypedArrays(*iab));
+}
+
+
+TEST(Uint8ArrayFromApi) {
+ TestTypedArrayFromApi<v8::Uint8Array>();
+}
+
+
+TEST(Int8ArrayFromApi) {
+ TestTypedArrayFromApi<v8::Int8Array>();
+}
+
+
+TEST(Uint16ArrayFromApi) {
+ TestTypedArrayFromApi<v8::Uint16Array>();
+}
+
+
+TEST(Int16ArrayFromApi) {
+ TestTypedArrayFromApi<v8::Int16Array>();
+}
+
+
+TEST(Uint32ArrayFromApi) {
+ TestTypedArrayFromApi<v8::Uint32Array>();
+}
+
+
+TEST(Int32ArrayFromApi) {
+ TestTypedArrayFromApi<v8::Int32Array>();
+}
+
+
+TEST(Float32ArrayFromApi) {
+ TestTypedArrayFromApi<v8::Float32Array>();
+}
+
+
+TEST(Float64ArrayFromApi) {
+ TestTypedArrayFromApi<v8::Float64Array>();
+}
+
+
+TEST(Uint8ClampedArrayFromApi) {
+ TestTypedArrayFromApi<v8::Uint8ClampedArray>();
+}
+
+template <typename TypedArray>
+static void TestTypedArrayFromScript(const char* constructor) {
+ v8::V8::Initialize();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ v8::HandleScope scope(context->GetIsolate());
+ CompileRun("var ab = new ArrayBuffer(2048);");
+ for (int i = 1; i <= 3; i++) {
+ // Create 3 typed arrays, make i-th of them garbage,
+ // validate correct state of typed array weak list.
+ v8::HandleScope s0(context->GetIsolate());
+ i::ScopedVector<char> source(2048);
+
+ CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()));
+
+ {
+ v8::HandleScope s1(context->GetIsolate());
+ i::OS::SNPrintF(source,
+ "var ta1 = new %s(ab);"
+ "var ta2 = new %s(ab);"
+ "var ta3 = new %s(ab)",
+ constructor, constructor, constructor);
+
+ CompileRun(source.start());
+ v8::Handle<v8::ArrayBuffer> ab(v8::ArrayBuffer::Cast(*CompileRun("ab")));
+ v8::Handle<TypedArray> ta1(TypedArray::Cast(*CompileRun("ta1")));
+ v8::Handle<TypedArray> ta2(TypedArray::Cast(*CompileRun("ta2")));
+ v8::Handle<TypedArray> ta3(TypedArray::Cast(*CompileRun("ta3")));
+ CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()));
+ Handle<JSArrayBuffer> iab = v8::Utils::OpenHandle(*ab);
+ CHECK_EQ(3, CountTypedArrays(*iab));
+ CHECK(HasTypedArrayInWeakList(*iab, *v8::Utils::OpenHandle(*ta1)));
+ CHECK(HasTypedArrayInWeakList(*iab, *v8::Utils::OpenHandle(*ta2)));
+ CHECK(HasTypedArrayInWeakList(*iab, *v8::Utils::OpenHandle(*ta3)));
+ }
+
+ i::OS::SNPrintF(source, "ta%d = null;", i);
+ CompileRun(source.start());
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+
+ CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()));
+
+ {
+ v8::HandleScope s2(context->GetIsolate());
+ v8::Handle<v8::ArrayBuffer> ab(v8::ArrayBuffer::Cast(*CompileRun("ab")));
+ Handle<JSArrayBuffer> iab = v8::Utils::OpenHandle(*ab);
+ CHECK_EQ(2, CountTypedArrays(*iab));
+ for (int j = 1; j <= 3; j++) {
+ if (j == i) continue;
+ i::OS::SNPrintF(source, "ta%d", j);
+ v8::Handle<TypedArray> ta(
+ TypedArray::Cast(*CompileRun(source.start())));
+ CHECK(HasTypedArrayInWeakList(*iab, *v8::Utils::OpenHandle(*ta)));
+ }
+ }
+
+ CompileRun("ta1 = null; ta2 = null; ta3 = null;");
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+
+ CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()));
+
+ {
+ v8::HandleScope s3(context->GetIsolate());
+ v8::Handle<v8::ArrayBuffer> ab(v8::ArrayBuffer::Cast(*CompileRun("ab")));
+ Handle<JSArrayBuffer> iab = v8::Utils::OpenHandle(*ab);
+ CHECK_EQ(0, CountTypedArrays(*iab));
+ }
+ }
+}
+
+
+TEST(Uint8ArrayFromScript) {
+ TestTypedArrayFromScript<v8::Uint8Array>("Uint8Array");
+}
+
+
+TEST(Int8ArrayFromScript) {
+ TestTypedArrayFromScript<v8::Int8Array>("Int8Array");
+}
+
+
+TEST(Uint16ArrayFromScript) {
+ TestTypedArrayFromScript<v8::Uint16Array>("Uint16Array");
+}
+
+
+TEST(Int16ArrayFromScript) {
+ TestTypedArrayFromScript<v8::Int16Array>("Int16Array");
+}
+
+
+TEST(Uint32ArrayFromScript) {
+ TestTypedArrayFromScript<v8::Uint32Array>("Uint32Array");
+}
+
+
+TEST(Int32ArrayFromScript) {
+ TestTypedArrayFromScript<v8::Int32Array>("Int32Array");
+}
+
+
+TEST(Float32ArrayFromScript) {
+ TestTypedArrayFromScript<v8::Float32Array>("Float32Array");
+}
+
+
+TEST(Float64ArrayFromScript) {
+ TestTypedArrayFromScript<v8::Float64Array>("Float64Array");
+}
+
+
+TEST(Uint8ClampedArrayFromScript) {
+ TestTypedArrayFromScript<v8::Uint8ClampedArray>("Uint8ClampedArray");
+}
+
diff --git a/deps/v8/test/mjsunit/allocation-site-info.js b/deps/v8/test/mjsunit/allocation-site-info.js
index d718993214..f533d61738 100644
--- a/deps/v8/test/mjsunit/allocation-site-info.js
+++ b/deps/v8/test/mjsunit/allocation-site-info.js
@@ -37,7 +37,7 @@
// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
support_smi_only_arrays = true;
-optimize_constructed_arrays = false;
+optimize_constructed_arrays = true;
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
@@ -281,6 +281,23 @@ if (support_smi_only_arrays) {
obj = newarraycase_list_smiobj(2);
assertKind(elements_kind.fast, obj);
+ function newarraycase_onearg(len, value) {
+ var a = new Array(len);
+ a[0] = value;
+ return a;
+ }
+
+ obj = newarraycase_onearg(5, 3.5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = newarraycase_onearg(10, 5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = newarraycase_onearg(0, 5);
+ assertKind(elements_kind.fast_double, obj);
+ // Now pass a length that forces the dictionary path.
+ obj = newarraycase_onearg(100000, 5);
+ assertKind(elements_kind.dictionary, obj);
+ assertTrue(obj.length == 100000);
+
// Verify that cross context calls work
var realmA = Realm.current();
var realmB = Realm.create();
diff --git a/deps/v8/test/mjsunit/debug-compile-event.js b/deps/v8/test/mjsunit/debug-compile-event.js
index 94dddfa104..89a71ddb59 100644
--- a/deps/v8/test/mjsunit/debug-compile-event.js
+++ b/deps/v8/test/mjsunit/debug-compile-event.js
@@ -80,7 +80,7 @@ function listener(event, exec_state, event_data, data) {
var msg = eval('(' + json + ')');
assertTrue('context' in msg.body.script);
- // Check that we pick script name from //@ sourceURL, iff present
+ // Check that we pick script name from //# sourceURL, iff present
assertEquals(current_source.indexOf('sourceURL') >= 0 ?
'myscript.js' : undefined,
event_data.script().name());
@@ -103,7 +103,7 @@ compileSource('eval("eval(\'(function(){return a;})\')")');
source_count += 2; // Using eval causes additional compilation event.
compileSource('JSON.parse(\'{"a":1,"b":2}\')');
// Using JSON.parse does not causes additional compilation events.
-compileSource('x=1; //@ sourceURL=myscript.js');
+compileSource('x=1; //# sourceURL=myscript.js');
// Make sure that the debug event listener was invoked.
assertFalse(exception, "exception in listener")
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
index 8d91b973ce..6696ec5ecd 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
@@ -185,6 +185,7 @@ function h(i, x0, y0) {
a0 = a0 + a0 / 100;
b0 = b0 + b0 / 100;
debugger; // Breakpoint.
+ return a0 + b0;
};
function g3(i, x1, y1) {
@@ -193,7 +194,7 @@ function g3(i, x1, y1) {
a1 = a1 + a1 / 100;
b1 = b1 + b1 / 100;
h(i - 1, a1, b1);
- return a1+b1;
+ return a1 + b1;
};
function g2(i) {
@@ -202,6 +203,7 @@ function g2(i) {
a2 = a2 + a2 / 100;
b2 = b2 + b2 / 100;
g3(i - 1, a2, b2);
+ return a2 + b2;
};
function g1(i, x3, y3, z3) {
@@ -210,6 +212,7 @@ function g1(i, x3, y3, z3) {
a3 = a3 + a3 / 100;
b3 = b3 + b3 / 100;
new g2(i - 1, a3, b3);
+ return a3 + b3;
};
function f(i, x4, y4) {
@@ -218,6 +221,7 @@ function f(i, x4, y4) {
a4 = a4 + a4 / 100;
b4 = b4 + b4 / 100;
g1(i - 1, a4, b4);
+ return a4 + b4;
};
// Test calling f normally and as a constructor.
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
index f66291288e..d424001b89 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
@@ -174,30 +174,35 @@ function h(i, x0, y0) {
var a0 = expected[i].locals.a0;
var b0 = expected[i].locals.b0;
debugger; // Breakpoint.
+ return a0 + b0;
}
function g3(i, x1, y1) {
var a1 = expected[i].locals.a1;
var b1 = expected[i].locals.b1;
h(i - 1, a1, b1);
+ return a1 + b1;
}
function g2(i) {
var a2 = expected[i].locals.a2;
var b2 = expected[i].locals.b2;
g3(i - 1, a2, b2);
+ return a2 + b2;
}
function g1(i, x3, y3, z3) {
var a3 = expected[i].locals.a3;
var b3 = expected[i].locals.b3;
new g2(i - 1, a3, b3);
+ return a3 + b3;
}
function f(i, x4, y4) {
var a4 = expected[i].locals.a4;
var b4 = expected[i].locals.b4;
g1(i - 1, a4, b4);
+ return a4 + b4;
}
// Test calling f normally and as a constructor.
diff --git a/deps/v8/test/mjsunit/debug-set-script-source.js b/deps/v8/test/mjsunit/debug-set-script-source.js
index 34ae8488a4..10ab43cd63 100644
--- a/deps/v8/test/mjsunit/debug-set-script-source.js
+++ b/deps/v8/test/mjsunit/debug-set-script-source.js
@@ -36,10 +36,10 @@ var exception = null;
function listener(event, exec_state, event_data, data) {
if (event == Debug.DebugEvent.BeforeCompile) {
event_data.script().setSource(event_data.script().source() +
- " //@ sourceURL=proper_location_" + (++script_number));
+ " //# sourceURL=proper_location_" + (++script_number));
} else if (event == Debug.DebugEvent.AfterCompile) {
try {
- event_data.script().setSource("a=1 //@ sourceURL=wrong_location");
+ event_data.script().setSource("a=1 //# sourceURL=wrong_location");
} catch(e) {
exception = e;
}
diff --git a/deps/v8/test/mjsunit/debug-setbreakpoint.js b/deps/v8/test/mjsunit/debug-setbreakpoint.js
index 90dfcd136b..8531c4e935 100644
--- a/deps/v8/test/mjsunit/debug-setbreakpoint.js
+++ b/deps/v8/test/mjsunit/debug-setbreakpoint.js
@@ -146,7 +146,7 @@ function g() {
};
eval('function h(){}');
-eval('function sourceUrlFunc() { a = 2; }\n//@ sourceURL=sourceUrlScript');
+eval('function sourceUrlFunc() { a = 2; }\n//# sourceURL=sourceUrlScript');
o = {a:function(){},b:function(){}}
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part1.js b/deps/v8/test/mjsunit/fuzz-natives-part1.js
index 8b290d582e..d5e1aeea5f 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part1.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part1.js
@@ -201,6 +201,10 @@ var knownProblems = {
"_GetCachedArrayIndex": true,
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
+
+ // Only applicable to generators.
+ "_GeneratorNext": true,
+ "_GeneratorThrow": true,
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part2.js b/deps/v8/test/mjsunit/fuzz-natives-part2.js
index 50ca5c2c79..699a341783 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part2.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part2.js
@@ -162,6 +162,8 @@ var knownProblems = {
"ResolvePossiblyDirectEval": true,
"Log": true,
"DeclareGlobals": true,
+ "ArrayConstructor": true,
+ "InternalArrayConstructor": true,
"PromoteScheduledException": true,
"DeleteHandleScopeExtensions": true,
@@ -200,6 +202,10 @@ var knownProblems = {
"_GetCachedArrayIndex": true,
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
+
+ // Only applicable to generators.
+ "_GeneratorNext": true,
+ "_GeneratorThrow": true,
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part3.js b/deps/v8/test/mjsunit/fuzz-natives-part3.js
index 05d32e9ae2..973963597f 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part3.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part3.js
@@ -200,6 +200,10 @@ var knownProblems = {
"_GetCachedArrayIndex": true,
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
+
+ // Only applicable to generators.
+ "_GeneratorNext": true,
+ "_GeneratorThrow": true,
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part4.js b/deps/v8/test/mjsunit/fuzz-natives-part4.js
index 542dcf3eb0..595af7d629 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part4.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part4.js
@@ -202,8 +202,8 @@ var knownProblems = {
"_TwoByteSeqStringSetChar": true,
// Only applicable to generators.
- "_GeneratorSend": true,
- "_GeneratorThrow": true
+ "_GeneratorNext": true,
+ "_GeneratorThrow": true,
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/harmony/generators-iteration.js b/deps/v8/test/mjsunit/harmony/generators-iteration.js
index e717f1b4a3..01facd085c 100644
--- a/deps/v8/test/mjsunit/harmony/generators-iteration.js
+++ b/deps/v8/test/mjsunit/harmony/generators-iteration.js
@@ -64,9 +64,9 @@ function TestGenerator(g, expected_values_for_next,
for (var i = 0; i < expected_values_for_send.length; i++) {
assertIteratorResult(expected_values_for_send[i],
i == expected_values_for_send.length - 1,
- iter.send(send_val));
+ iter.next(send_val));
}
- assertThrows(function() { iter.send(send_val); }, Error);
+ assertThrows(function() { iter.next(send_val); }, Error);
}
function testThrow(thunk) {
for (var i = 0; i < expected_values_for_next.length; i++) {
@@ -572,7 +572,7 @@ function TestRecursion() {
return iter.next();
}
function TestSendRecursion() {
- function* g() { yield iter.send(42); }
+ function* g() { yield iter.next(42); }
var iter = g();
return iter.next();
}
diff --git a/deps/v8/test/mjsunit/harmony/generators-objects.js b/deps/v8/test/mjsunit/harmony/generators-objects.js
index b717c303c8..bb29bed008 100644
--- a/deps/v8/test/mjsunit/harmony/generators-objects.js
+++ b/deps/v8/test/mjsunit/harmony/generators-objects.js
@@ -79,7 +79,7 @@ function TestGeneratorObjectMethods() {
function TestNonGenerator(non_generator) {
assertThrows(function() { iter.next.call(non_generator); }, TypeError);
- assertThrows(function() { iter.send.call(non_generator, 1); }, TypeError);
+ assertThrows(function() { iter.next.call(non_generator, 1); }, TypeError);
assertThrows(function() { iter.throw.call(non_generator, 1); }, TypeError);
assertThrows(function() { iter.close.call(non_generator); }, TypeError);
}
diff --git a/deps/v8/test/mjsunit/harmony/generators-runtime.js b/deps/v8/test/mjsunit/harmony/generators-runtime.js
index b4e8f950e1..7667deb7f6 100644
--- a/deps/v8/test/mjsunit/harmony/generators-runtime.js
+++ b/deps/v8/test/mjsunit/harmony/generators-runtime.js
@@ -84,7 +84,7 @@ function TestGeneratorObjectPrototype() {
assertSame(GeneratorObjectPrototype,
Object.getPrototypeOf((function*(){yield 1}).prototype));
- var expected_property_names = ["next", "send", "throw", "constructor"];
+ var expected_property_names = ["next", "throw", "constructor"];
var found_property_names =
Object.getOwnPropertyNames(GeneratorObjectPrototype);
diff --git a/deps/v8/test/mjsunit/harmony/iteration-semantics.js b/deps/v8/test/mjsunit/harmony/iteration-semantics.js
new file mode 100644
index 0000000000..96b6d1452c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/iteration-semantics.js
@@ -0,0 +1,327 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony --harmony-generators
+
+// Test for-of semantics.
+
+"use strict";
+
+
+// First, some helpers.
+
+function* values() {
+ for (var i = 0; i < arguments.length; i++) {
+ yield arguments[i];
+ }
+}
+
+function integers_until(max) {
+ function next() {
+ var ret = { value: this.n, done: this.n == max };
+ this.n++;
+ return ret;
+ }
+ return { next: next, n: 0 }
+}
+
+function results(results) {
+ var i = 0;
+ function next() {
+ return results[i++];
+ }
+ return { next: next }
+}
+
+function* integers_from(n) {
+ while (1) yield n++;
+}
+
+// A destructive append.
+function append(x, tail) {
+ tail[tail.length] = x;
+ return tail;
+}
+
+function sum(x, tail) {
+ return x + tail;
+}
+
+function fold(cons, seed, iter) {
+ for (var x of iter) {
+ seed = cons(x, seed);
+ }
+ return seed;
+}
+
+function* take(iter, n) {
+ if (n == 0) return;
+ for (let x of iter) {
+ yield x;
+ if (--n == 0) break;
+ }
+}
+
+function nth(iter, n) {
+ for (let x of iter) {
+ if (n-- == 0) return x;
+ }
+ throw "unreachable";
+}
+
+function* skip_every(iter, n) {
+ var i = 0;
+ for (let x of iter) {
+ if (++i % n == 0) continue;
+ yield x;
+ }
+}
+
+function* iter_map(iter, f) {
+ for (var x of iter) {
+ yield f(x);
+ }
+}
+function nested_fold(cons, seed, iter) {
+ var visited = []
+ for (let x of iter) {
+ for (let y of x) {
+ seed = cons(y, seed);
+ }
+ }
+ return seed;
+}
+
+function* unreachable(iter) {
+ for (let x of iter) {
+ throw "not reached";
+ }
+}
+
+function one_time_getter(o, prop, val) {
+ function set_never() { throw "unreachable"; }
+ var gotten = false;
+ function get_once() {
+ if (gotten) throw "got twice";
+ gotten = true;
+ return val;
+ }
+ Object.defineProperty(o, prop, {get: get_once, set: set_never})
+ return o;
+}
+
+function never_getter(o, prop) {
+ function never() { throw "unreachable"; }
+ Object.defineProperty(o, prop, {get: never, set: never})
+ return o;
+}
+
+function remove_next_after(iter, n) {
+ function next() {
+ if (n-- == 0) delete this.next;
+ return iter.next();
+ }
+ return { next: next }
+}
+
+function poison_next_after(iter, n) {
+ function next() {
+ return iter.next();
+ }
+ function next_getter() {
+ if (n-- < 0)
+ throw "poisoned";
+ return next;
+ }
+ var o = {};
+ Object.defineProperty(o, 'next', { get: next_getter });
+ return o;
+}
+
+// Now, the tests.
+
+// Non-generator iterators.
+assertEquals(45, fold(sum, 0, integers_until(10)));
+// Generator iterators.
+assertEquals([1, 2, 3], fold(append, [], values(1, 2, 3)));
+// Break.
+assertEquals(45, fold(sum, 0, take(integers_from(0), 10)));
+// Continue.
+assertEquals(90, fold(sum, 0, take(skip_every(integers_from(0), 2), 10)));
+// Return.
+assertEquals(10, nth(integers_from(0), 10));
+// Nested for-of.
+assertEquals([0, 0, 1, 0, 1, 2, 0, 1, 2, 3],
+ nested_fold(append,
+ [],
+ iter_map(integers_until(5), integers_until)));
+// Result objects with sparse fields.
+assertEquals([undefined, 1, 2, 3],
+ fold(append, [],
+ results([{ done: false },
+ { value: 1, done: false },
+ // A missing "done" is the same as undefined, which
+ // is false.
+ { value: 2 },
+ // Not done.
+ { value: 3, done: 0 },
+ // Done.
+ { value: 4, done: 42 }])));
+// Results that are not objects.
+assertEquals([undefined, undefined, undefined],
+ fold(append, [],
+ results([10, "foo", /qux/, { value: 37, done: true }])));
+// Getters (shudder).
+assertEquals([1, 2],
+ fold(append, [],
+ results([one_time_getter({ value: 1 }, 'done', false),
+ one_time_getter({ done: false }, 'value', 2),
+ { value: 37, done: true },
+ never_getter(never_getter({}, 'done'), 'value')])));
+
+// Null and undefined do not cause an error.
+assertEquals(0, fold(sum, 0, unreachable(null)));
+assertEquals(0, fold(sum, 0, unreachable(undefined)));
+
+// Other non-iterators do cause an error.
+assertThrows('fold(sum, 0, unreachable({}))', TypeError);
+assertThrows('fold(sum, 0, unreachable("foo"))', TypeError);
+assertThrows('fold(sum, 0, unreachable(37))', TypeError);
+
+// "next" is looked up each time.
+assertThrows('fold(sum, 0, remove_next_after(integers_until(10), 5))',
+ TypeError);
+// It is not called at any other time.
+assertEquals(45,
+ fold(sum, 0, remove_next_after(integers_until(10), 10)));
+// It is not looked up too many times.
+assertEquals(45,
+ fold(sum, 0, poison_next_after(integers_until(10), 10)));
+
+function labelled_continue(iter) {
+ var n = 0;
+outer:
+ while (true) {
+ n++;
+ for (var x of iter) continue outer;
+ break;
+ }
+ return n;
+}
+assertEquals(11, labelled_continue(integers_until(10)));
+
+function labelled_break(iter) {
+ var n = 0;
+outer:
+ while (true) {
+ n++;
+ for (var x of iter) break outer;
+ }
+ return n;
+}
+assertEquals(1, labelled_break(integers_until(10)));
+
+// Test continue/break in catch.
+function catch_control(iter, k) {
+ var n = 0;
+ for (var x of iter) {
+ try {
+ return k(x);
+ } catch (e) {
+ if (e == "continue") continue;
+ else if (e == "break") break;
+ else throw e;
+ }
+ } while (false);
+ return false;
+}
+assertEquals(false,
+ catch_control(integers_until(10),
+ function() { throw "break" }));
+assertEquals(false,
+ catch_control(integers_until(10),
+ function() { throw "continue" }));
+assertEquals(5,
+ catch_control(integers_until(10),
+ function(x) {
+ if (x == 5) return x;
+ throw "continue";
+ }));
+
+// Test continue/break in try.
+function try_control(iter, k) {
+ var n = 0;
+ for (var x of iter) {
+ try {
+ var e = k(x);
+ if (e == "continue") continue;
+ else if (e == "break") break;
+ return e;
+ } catch (e) {
+ throw e;
+ }
+ } while (false);
+ return false;
+}
+assertEquals(false,
+ try_control(integers_until(10),
+ function() { return "break" }));
+assertEquals(false,
+ try_control(integers_until(10),
+ function() { return "continue" }));
+assertEquals(5,
+ try_control(integers_until(10),
+ function(x) { return (x == 5) ? x : "continue" }));
+
+// Proxy results, with getters.
+function transparent_proxy(x) {
+ return Proxy.create({
+ get: function(receiver, name) { return x[name]; }
+ });
+}
+assertEquals([1, 2],
+ fold(append, [],
+ results([one_time_getter({ value: 1 }, 'done', false),
+ one_time_getter({ done: false }, 'value', 2),
+ { value: 37, done: true },
+ never_getter(never_getter({}, 'done'), 'value')]
+ .map(transparent_proxy))));
+
+// Proxy iterators.
+function poison_proxy_after(x, n) {
+ return Proxy.create({
+ get: function(receiver, name) {
+ if (name == 'next' && n-- < 0) throw "unreachable";
+ return x[name];
+ },
+ // Needed for integers_until(10)'s this.n++.
+ set: function(receiver, name, val) {
+ return x[name] = val;
+ }
+ });
+}
+assertEquals(45, fold(sum, 0, poison_proxy_after(integers_until(10), 10)));
diff --git a/deps/v8/test/mjsunit/harmony/iteration-syntax.js b/deps/v8/test/mjsunit/harmony/iteration-syntax.js
new file mode 100644
index 0000000000..21149c04bc
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/iteration-syntax.js
@@ -0,0 +1,65 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-iteration --harmony-scoping
+
+// Test for-of syntax.
+
+"use strict";
+
+function f() { for (x of y) { } }
+function f() { for (var x of y) { } }
+function f() { for (let x of y) { } }
+
+assertThrows("function f() { for (x of) { } }", SyntaxError);
+assertThrows("function f() { for (x of y z) { } }", SyntaxError);
+assertThrows("function f() { for (x of y;) { } }", SyntaxError);
+
+assertThrows("function f() { for (var x of) { } }", SyntaxError);
+assertThrows("function f() { for (var x of y z) { } }", SyntaxError);
+assertThrows("function f() { for (var x of y;) { } }", SyntaxError);
+
+assertThrows("function f() { for (let x of) { } }", SyntaxError);
+assertThrows("function f() { for (let x of y z) { } }", SyntaxError);
+assertThrows("function f() { for (let x of y;) { } }", SyntaxError);
+
+assertThrows("function f() { for (of y) { } }", SyntaxError);
+assertThrows("function f() { for (of of) { } }", SyntaxError);
+assertThrows("function f() { for (var of y) { } }", SyntaxError);
+assertThrows("function f() { for (var of of) { } }", SyntaxError);
+assertThrows("function f() { for (let of y) { } }", SyntaxError);
+assertThrows("function f() { for (let of of) { } }", SyntaxError);
+
+// Alack, this appears to be valid.
+function f() { for (of of y) { } }
+function f() { for (let of of y) { } }
+function f() { for (var of of y) { } }
+
+// This too, of course.
+function f() { for (of in y) { } }
+function f() { for (var of in y) { } }
+function f() { for (let of in y) { } }
diff --git a/deps/v8/test/mjsunit/harmony/object-observe.js b/deps/v8/test/mjsunit/harmony/object-observe.js
index 372ffdbdb7..0434ccdcb6 100644
--- a/deps/v8/test/mjsunit/harmony/object-observe.js
+++ b/deps/v8/test/mjsunit/harmony/object-observe.js
@@ -957,15 +957,15 @@ var arr2 = ['alpha', 'beta'];
var arr3 = ['hello'];
arr3[2] = 'goodbye';
arr3.length = 6;
-var slow_arr = new Array(1000000000);
-slow_arr[500000000] = 'hello';
Object.defineProperty(arr, '0', {configurable: false});
Object.defineProperty(arr, '2', {get: function(){}});
Object.defineProperty(arr2, '0', {get: function(){}, configurable: false});
Object.observe(arr, observer.callback);
+Array.observe(arr, observer2.callback);
Object.observe(arr2, observer.callback);
+Array.observe(arr2, observer2.callback);
Object.observe(arr3, observer.callback);
-Object.observe(slow_arr, observer.callback);
+Array.observe(arr3, observer2.callback);
arr.length = 2;
arr.length = 0;
arr.length = 10;
@@ -978,8 +978,8 @@ arr3.length = 0;
arr3.length++;
arr3.length /= 2;
Object.defineProperty(arr3, 'length', {value: 5});
-Object.defineProperty(arr3, 'length', {value: 10, writable: false});
-slow_arr.length = 100;
+arr3[4] = 5;
+Object.defineProperty(arr3, 'length', {value: 1, writable: false});
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
{ object: arr, name: '3', type: 'deleted', oldValue: 'd' },
@@ -991,7 +991,7 @@ observer.assertCallbackRecords([
{ object: arr, name: 'length', type: 'reconfigured' },
{ object: arr2, name: '1', type: 'deleted', oldValue: 'beta' },
{ object: arr2, name: 'length', type: 'updated', oldValue: 2 },
- { object: arr2, name: 'length', type: 'reconfigured', oldValue: 1 },
+ { object: arr2, name: 'length', type: 'reconfigured' },
{ object: arr3, name: '2', type: 'deleted', oldValue: 'goodbye' },
{ object: arr3, name: '0', type: 'deleted', oldValue: 'hello' },
{ object: arr3, name: 'length', type: 'updated', oldValue: 6 },
@@ -999,10 +999,60 @@ observer.assertCallbackRecords([
{ object: arr3, name: 'length', type: 'updated', oldValue: 1 },
{ object: arr3, name: 'length', type: 'updated', oldValue: 2 },
{ object: arr3, name: 'length', type: 'updated', oldValue: 1 },
- { object: arr3, name: 'length', type: 'reconfigured', oldValue: 5 },
+ { object: arr3, name: '4', type: 'new' },
+ { object: arr3, name: '4', type: 'deleted', oldValue: 5 },
+ // TODO(rafaelw): It breaks spec compliance to get two records here.
+ // When the TODO in v8natives.js::DefineArrayProperty is addressed
+ // which prevents DefineProperty from over-writing the magic length
+ // property, these will collapse into a single record.
+ { object: arr3, name: 'length', type: 'updated', oldValue: 5 },
+ { object: arr3, name: 'length', type: 'reconfigured' }
+]);
+Object.deliverChangeRecords(observer2.callback);
+observer2.assertCallbackRecords([
+ { object: arr, type: 'splice', index: 2, removed: [, 'd'], addedCount: 0 },
+ { object: arr, type: 'splice', index: 1, removed: ['b'], addedCount: 0 },
+ { object: arr, type: 'splice', index: 1, removed: [], addedCount: 9 },
+ { object: arr2, type: 'splice', index: 1, removed: ['beta'], addedCount: 0 },
+ { object: arr3, type: 'splice', index: 0, removed: ['hello',, 'goodbye',,,,], addedCount: 0 },
+ { object: arr3, type: 'splice', index: 0, removed: [], addedCount: 1 },
+ { object: arr3, type: 'splice', index: 1, removed: [], addedCount: 1 },
+ { object: arr3, type: 'splice', index: 1, removed: [,], addedCount: 0 },
+ { object: arr3, type: 'splice', index: 1, removed: [], addedCount: 4 },
+ { object: arr3, name: '4', type: 'new' },
+ { object: arr3, type: 'splice', index: 1, removed: [,,,5], addedCount: 0 }
+]);
+
+
+// Updating length on large (slow) array
+reset();
+var slow_arr = new Array(1000000000);
+slow_arr[500000000] = 'hello';
+Object.observe(slow_arr, observer.callback);
+var spliceRecords;
+function slowSpliceCallback(records) {
+ spliceRecords = records;
+}
+Array.observe(slow_arr, slowSpliceCallback);
+slow_arr.length = 100;
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
{ object: slow_arr, name: '500000000', type: 'deleted', oldValue: 'hello' },
{ object: slow_arr, name: 'length', type: 'updated', oldValue: 1000000000 },
]);
+Object.deliverChangeRecords(slowSpliceCallback);
+assertEquals(spliceRecords.length, 1);
+// Have to custom assert this splice record because the removed array is huge.
+var splice = spliceRecords[0];
+assertSame(splice.object, slow_arr);
+assertEquals(splice.type, 'splice');
+assertEquals(splice.index, 100);
+assertEquals(splice.addedCount, 0);
+var array_keys = %GetArrayKeys(splice.removed, splice.removed.length);
+assertEquals(array_keys.length, 1);
+assertEquals(array_keys[0], 499999900);
+assertEquals(splice.removed[499999900], 'hello');
+assertEquals(splice.removed.length, 999999900);
// Assignments in loops (checking different IC states).
@@ -1037,10 +1087,12 @@ observer.assertCallbackRecords([
]);
-// Adding elements past the end of an array should notify on length
+// Adding elements past the end of an array should notify on length for
+// Object.observe and emit "splices" for Array.observe.
reset();
var arr = [1, 2, 3];
Object.observe(arr, observer.callback);
+Array.observe(arr, observer2.callback);
arr[3] = 10;
arr[100] = 20;
Object.defineProperty(arr, '200', {value: 7});
@@ -1058,6 +1110,14 @@ observer.assertCallbackRecords([
{ object: arr, name: 'length', type: 'updated', oldValue: 201 },
{ object: arr, name: '50', type: 'new' },
]);
+Object.deliverChangeRecords(observer2.callback);
+observer2.assertCallbackRecords([
+ { object: arr, type: 'splice', index: 3, removed: [], addedCount: 1 },
+ { object: arr, type: 'splice', index: 4, removed: [], addedCount: 97 },
+ { object: arr, type: 'splice', index: 101, removed: [], addedCount: 100 },
+ { object: arr, type: 'splice', index: 201, removed: [], addedCount: 200 },
+ { object: arr, type: 'new', name: '50' },
+]);
// Tests for array methods, first on arrays and then on plain objects
@@ -1142,6 +1202,22 @@ observer.assertCallbackRecords([
{ object: array, name: '2', type: 'updated', oldValue: 3 },
]);
+// Sort
+reset();
+var array = [3, 2, 1];
+Object.observe(array, observer.callback);
+array.sort();
+assertEquals(1, array[0]);
+assertEquals(2, array[1]);
+assertEquals(3, array[2]);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: array, name: '1', type: 'updated', oldValue: 2 },
+ { object: array, name: '0', type: 'updated', oldValue: 3 },
+ { object: array, name: '2', type: 'updated', oldValue: 1 },
+ { object: array, name: '1', type: 'updated', oldValue: 3 },
+ { object: array, name: '0', type: 'updated', oldValue: 2 },
+]);
//
// === PLAIN OBJECTS ===
@@ -1159,11 +1235,13 @@ observer.assertCallbackRecords([
]);
// Pop
-reset()
-var array = {0: 1, 1: 2, length: 2};
+reset();
+var array = [1, 2];
Object.observe(array, observer.callback);
-Array.prototype.pop.call(array);
-Array.prototype.pop.call(array);
+Array.observe(array, observer2.callback);
+array.pop();
+array.pop();
+array.pop();
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
{ object: array, name: '1', type: 'deleted', oldValue: 2 },
@@ -1171,13 +1249,20 @@ observer.assertCallbackRecords([
{ object: array, name: '0', type: 'deleted', oldValue: 1 },
{ object: array, name: 'length', type: 'updated', oldValue: 1 },
]);
+Object.deliverChangeRecords(observer2.callback);
+observer2.assertCallbackRecords([
+ { object: array, type: 'splice', index: 1, removed: [2], addedCount: 0 },
+ { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 }
+]);
// Shift
-reset()
-var array = {0: 1, 1: 2, length: 2};
+reset();
+var array = [1, 2];
Object.observe(array, observer.callback);
-Array.prototype.shift.call(array);
-Array.prototype.shift.call(array);
+Array.observe(array, observer2.callback);
+array.shift();
+array.shift();
+array.shift();
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
{ object: array, name: '0', type: 'updated', oldValue: 1 },
@@ -1186,32 +1271,71 @@ observer.assertCallbackRecords([
{ object: array, name: '0', type: 'deleted', oldValue: 2 },
{ object: array, name: 'length', type: 'updated', oldValue: 1 },
]);
+Object.deliverChangeRecords(observer2.callback);
+observer2.assertCallbackRecords([
+ { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 },
+ { object: array, type: 'splice', index: 0, removed: [2], addedCount: 0 }
+]);
// Unshift
-reset()
-var array = {0: 1, 1: 2, length: 2};
+reset();
+var array = [1, 2];
Object.observe(array, observer.callback);
-Array.prototype.unshift.call(array, 3, 4);
+Array.observe(array, observer2.callback);
+array.unshift(3, 4);
+array.unshift(5);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
{ object: array, name: '3', type: 'new' },
+ { object: array, name: 'length', type: 'updated', oldValue: 2 },
{ object: array, name: '2', type: 'new' },
{ object: array, name: '0', type: 'updated', oldValue: 1 },
{ object: array, name: '1', type: 'updated', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
+ { object: array, name: '4', type: 'new' },
+ { object: array, name: 'length', type: 'updated', oldValue: 4 },
+ { object: array, name: '3', type: 'updated', oldValue: 2 },
+ { object: array, name: '2', type: 'updated', oldValue: 1 },
+ { object: array, name: '1', type: 'updated', oldValue: 4 },
+ { object: array, name: '0', type: 'updated', oldValue: 3 },
+]);
+Object.deliverChangeRecords(observer2.callback);
+observer2.assertCallbackRecords([
+ { object: array, type: 'splice', index: 0, removed: [], addedCount: 2 },
+ { object: array, type: 'splice', index: 0, removed: [], addedCount: 1 }
]);
// Splice
-reset()
-var array = {0: 1, 1: 2, 2: 3, length: 3};
+reset();
+var array = [1, 2, 3];
Object.observe(array, observer.callback);
-Array.prototype.splice.call(array, 1, 1, 4, 5);
+Array.observe(array, observer2.callback);
+array.splice(1, 0, 4, 5); // 1 4 5 2 3
+array.splice(0, 2); // 5 2 3
+array.splice(1, 2, 6, 7); // 5 6 7
+array.splice(2, 0);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
+ { object: array, name: '4', type: 'new' },
+ { object: array, name: 'length', type: 'updated', oldValue: 3 },
{ object: array, name: '3', type: 'new' },
{ object: array, name: '1', type: 'updated', oldValue: 2 },
{ object: array, name: '2', type: 'updated', oldValue: 3 },
- { object: array, name: 'length', type: 'updated', oldValue: 3 },
+
+ { object: array, name: '0', type: 'updated', oldValue: 1 },
+ { object: array, name: '1', type: 'updated', oldValue: 4 },
+ { object: array, name: '2', type: 'updated', oldValue: 5 },
+ { object: array, name: '4', type: 'deleted', oldValue: 3 },
+ { object: array, name: '3', type: 'deleted', oldValue: 2 },
+ { object: array, name: 'length', type: 'updated', oldValue: 5 },
+
+ { object: array, name: '1', type: 'updated', oldValue: 2 },
+ { object: array, name: '2', type: 'updated', oldValue: 3 },
+]);
+Object.deliverChangeRecords(observer2.callback);
+observer2.assertCallbackRecords([
+ { object: array, type: 'splice', index: 1, removed: [], addedCount: 2 },
+ { object: array, type: 'splice', index: 0, removed: [1, 4], addedCount: 0 },
+ { object: array, type: 'splice', index: 1, removed: [2, 3], addedCount: 2 },
]);
// Exercise StoreIC_ArrayLength
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 585d503a0f..8d6274bf2a 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -47,6 +47,10 @@ regress/regress-crbug-160010: SKIP
stack-traces-gc: PASS || FAIL
##############################################################################
+# TODO(wingo): Re-enable when GC bug from r15060 is solved.
+harmony/generators-iteration: SKIP
+
+##############################################################################
# Too slow in debug mode with --stress-opt mode.
compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
compiler/regress-funcaller: PASS, SKIP if $mode == debug
diff --git a/deps/v8/test/mjsunit/object-define-property.js b/deps/v8/test/mjsunit/object-define-property.js
index 835d0e0a55..cbb2d211f4 100644
--- a/deps/v8/test/mjsunit/object-define-property.js
+++ b/deps/v8/test/mjsunit/object-define-property.js
@@ -1195,3 +1195,12 @@ Assign(new C);
%OptimizeFunctionOnNextCall(Assign);
Object.defineProperty(C.prototype, "blubb", {get: function() { return -42; }});
Assign(new C);
+
+// Test that changes to the prototype of a simple constructor are not ignored,
+// even after creating initial instances.
+function C() {
+ this.x = 23;
+}
+assertEquals(23, new C().x);
+C.prototype.__defineSetter__('x', function(value) { this.y = 23; });
+assertEquals(void 0, new C().x);
diff --git a/deps/v8/test/mjsunit/object-freeze.js b/deps/v8/test/mjsunit/object-freeze.js
index c3a9278bbd..a0717a171c 100644
--- a/deps/v8/test/mjsunit/object-freeze.js
+++ b/deps/v8/test/mjsunit/object-freeze.js
@@ -28,6 +28,7 @@
// Tests the Object.freeze and Object.isFrozen methods - ES 15.2.3.9 and
// ES 15.2.3.12
+// Flags: --allow-natives-syntax
// Test that we throw an error if an object is not passed as argument.
var non_objects = new Array(undefined, null, 1, -1, 0, 42.43);
@@ -191,3 +192,125 @@ assertFalse(Object.isFrozen(obj5));
// Make sure that Object.freeze returns the frozen object.
var obj6 = {}
assertTrue(obj6 === Object.freeze(obj6))
+
+// Test that the enumerable attribute is unperturbed by freezing.
+obj = { x: 42, y: 'foo' };
+Object.defineProperty(obj, 'y', {enumerable: false});
+Object.freeze(obj);
+assertTrue(Object.isFrozen(obj));
+desc = Object.getOwnPropertyDescriptor(obj, 'x');
+assertTrue(desc.enumerable);
+desc = Object.getOwnPropertyDescriptor(obj, 'y');
+assertFalse(desc.enumerable);
+
+// Fast properties should remain fast
+obj = { x: 42, y: 'foo' };
+assertTrue(%HasFastProperties(obj));
+Object.freeze(obj);
+assertTrue(Object.isFrozen(obj));
+assertTrue(%HasFastProperties(obj));
+
+// Frozen objects should share maps where possible
+obj = { prop1: 1, prop2: 2 };
+obj2 = { prop1: 3, prop2: 4 };
+assertTrue(%HaveSameMap(obj, obj2));
+Object.freeze(obj);
+Object.freeze(obj2);
+assertTrue(Object.isFrozen(obj));
+assertTrue(Object.isFrozen(obj2));
+assertTrue(%HaveSameMap(obj, obj2));
+
+// Frozen objects should share maps even when they have elements
+obj = { prop1: 1, prop2: 2, 75: 'foo' };
+obj2 = { prop1: 3, prop2: 4, 150: 'bar' };
+assertTrue(%HaveSameMap(obj, obj2));
+Object.freeze(obj);
+Object.freeze(obj2);
+assertTrue(Object.isFrozen(obj));
+assertTrue(Object.isFrozen(obj2));
+assertTrue(%HaveSameMap(obj, obj2));
+
+// Setting elements after freezing should not be allowed
+obj = { prop: 'thing' };
+Object.freeze(obj);
+assertTrue(Object.isFrozen(obj));
+obj[0] = 'hello';
+assertFalse(obj.hasOwnProperty(0));
+
+// Freezing an object in dictionary mode should work
+// Also testing that getter/setter properties work after freezing
+obj = { };
+for (var i = 0; i < 100; ++i) {
+ obj['x' + i] = i;
+}
+var accessorDidRun = false;
+Object.defineProperty(obj, 'accessor', {
+ get: function() { return 42 },
+ set: function() { accessorDidRun = true },
+ configurable: true,
+ enumerable: true
+});
+
+assertFalse(%HasFastProperties(obj));
+Object.freeze(obj);
+assertFalse(%HasFastProperties(obj));
+assertTrue(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+for (var i = 0; i < 100; ++i) {
+ desc = Object.getOwnPropertyDescriptor(obj, 'x' + i);
+ assertFalse(desc.writable);
+ assertFalse(desc.configurable);
+}
+assertEquals(42, obj.accessor);
+assertFalse(accessorDidRun);
+obj.accessor = 'ignored value';
+assertTrue(accessorDidRun);
+
+// Freezing arguments should work
+var func = function(arg) {
+ Object.freeze(arguments);
+ assertTrue(Object.isFrozen(arguments));
+};
+func('hello', 'world');
+func('goodbye', 'world');
+
+// Freezing sparse arrays
+var sparseArr = [0, 1];
+sparseArr[10000] = 10000;
+Object.freeze(sparseArr);
+assertTrue(Object.isFrozen(sparseArr));
+
+// Accessors on fast object should behavior properly after freezing
+obj = {};
+Object.defineProperty(obj, 'accessor', {
+ get: function() { return 42 },
+ set: function() { accessorDidRun = true },
+ configurable: true,
+ enumerable: true
+});
+assertTrue(%HasFastProperties(obj));
+Object.freeze(obj);
+assertTrue(Object.isFrozen(obj));
+assertTrue(%HasFastProperties(obj));
+assertEquals(42, obj.accessor);
+accessorDidRun = false;
+obj.accessor = 'ignored value';
+assertTrue(accessorDidRun);
+
+// Test for regression in mixed accessor/data property objects.
+// The strict function is one such object.
+assertTrue(Object.isFrozen(Object.freeze(function(){"use strict";})));
+
+// Also test a simpler case
+obj = {};
+Object.defineProperty(obj, 'accessor', {
+ get: function() { return 42 },
+ set: function() { accessorDidRun = true },
+ configurable: true,
+ enumerable: true
+});
+obj.data = 'foo';
+assertTrue(%HasFastProperties(obj));
+Object.freeze(obj);
+assertTrue(%HasFastProperties(obj));
+assertTrue(Object.isFrozen(obj));
diff --git a/deps/v8/test/mjsunit/regress/regress-1853.js b/deps/v8/test/mjsunit/regress/regress-1853.js
index f80badecb6..cfafe82fa3 100644
--- a/deps/v8/test/mjsunit/regress/regress-1853.js
+++ b/deps/v8/test/mjsunit/regress/regress-1853.js
@@ -102,13 +102,13 @@ eval('function test1() { \n' +
' assertFalse(test_break_1); \n' +
' assertTrue(test_break_1); \n' +
'} \n' +
- '//@ sourceURL=testScriptOne');
+ '//# sourceURL=testScriptOne');
eval('function test2() { \n' +
' assertFalse(test_break_2); \n' +
' assertTrue(test_break_2); \n' +
'} \n' +
- '//@ sourceURL=testScriptTwo');
+ '//# sourceURL=testScriptTwo');
test1();
test2();
diff --git a/deps/v8/test/mjsunit/bugs/bug-618.js b/deps/v8/test/mjsunit/regress/regress-2132.js
index 0513f87f16..d8987a554a 100644
--- a/deps/v8/test/mjsunit/bugs/bug-618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2132.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,21 +25,24 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// When this bug is corrected move to object-define-property and add
-// additional tests for configurable in the same manner as existing tests
-// there.
+// Flags: --allow-natives-syntax
-function C() {
- this.x = 23;
+function mul(x, y) {
+ return (x * y) | 0;
}
-// If a setter is added to the prototype chain of a simple constructor setting
-// one of the properties assigned in the constructor then this setter is
-// ignored when constructing new objects from the constructor.
+mul(0, 0);
+mul(0, 0);
+%OptimizeFunctionOnNextCall(mul);
+assertEquals(0, mul(0, -1));
+assertTrue(%GetOptimizationStatus(mul) != 2);
-// This only happens if the setter is added _after_ an instance has been
-// created.
+function div(x, y) {
+ return (x / y) | 0;
+}
-assertEquals(23, new C().x);
-C.prototype.__defineSetter__('x', function(value) { this.y = 23; });
-assertEquals(void 0, new C().x);
+div(4, 2);
+div(4, 2);
+%OptimizeFunctionOnNextCall(div);
+assertEquals(1, div(5, 3));
+assertTrue(%GetOptimizationStatus(div) != 2);
diff --git a/deps/v8/src/builtins-decls.h b/deps/v8/test/mjsunit/regress/regress-237617.js
index beb5dd1e80..dabf828ae8 100644
--- a/deps/v8/src/builtins-decls.h
+++ b/deps/v8/test/mjsunit/regress/regress-237617.js
@@ -25,16 +25,19 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_BUILTINS_DECLS_H_
-#define V8_BUILTINS_DECLS_H_
+"use strict"
-#include "arguments.h"
+function f() {
+ throw new Error("test stack");
+}
-namespace v8 {
-namespace internal {
+var error_stack = "";
+try {
+ f.call(null);
+} catch (e) {
+ error_stack = e.stack;
+}
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure);
+assertTrue(error_stack.indexOf("test stack") > 0);
+assertTrue(error_stack.indexOf("illegal") < 0);
-} } // namespace v8::internal
-
-#endif // V8_BUILTINS_DECLS_H_
diff --git a/deps/v8/test/mjsunit/regress/regress-2690.js b/deps/v8/test/mjsunit/regress/regress-2690.js
new file mode 100644
index 0000000000..5fe7dc42dc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2690.js
@@ -0,0 +1,29 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertTrue(/\1[a]/.test("\1a"));
+
diff --git a/deps/v8/test/mjsunit/regress/regress-2717.js b/deps/v8/test/mjsunit/regress/regress-2717.js
new file mode 100644
index 0000000000..4f8f7915b1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2717.js
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test re-initializing existing field which is already being tracked as
+// having double representation.
+(function() {
+ function test1(a) {
+ return { x: 1.5, x: a };
+ };
+
+ assertEquals({}, test1({}).x);
+})();
+
+// Test initializing new field which follows an existing transition to a
+// map that tracks it as having double representation.
+(function() {
+ function test1(a) {
+ return { y: a };
+ };
+
+ function test2(a) {
+ return { y: a };
+ };
+
+ assertEquals(1.5, test1(1.5).y);
+ assertEquals({}, test2({}).y);
+})();
diff --git a/deps/v8/test/mjsunit/bugs/618.js b/deps/v8/test/mjsunit/regress/regress-618.js
index ddc0c19c88..ddc0c19c88 100644
--- a/deps/v8/test/mjsunit/bugs/618.js
+++ b/deps/v8/test/mjsunit/regress/regress-618.js
diff --git a/deps/v8/test/mjsunit/regress/regress-convert-hole.js b/deps/v8/test/mjsunit/regress/regress-convert-hole.js
new file mode 100644
index 0000000000..1e9c3f3138
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-convert-hole.js
@@ -0,0 +1,109 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f_store(test, test2, a, i) {
+ var o = [0.5,1,,3];
+ var d;
+ if (test) {
+ d = 1.5;
+ } else {
+ d = o[i];
+ }
+ if (test2) {
+ d += 1;
+ }
+ a[i] = d;
+ return d;
+}
+
+var a1 = [0, 0, 0, {}];
+f_store(true, false, a1, 0);
+f_store(true, true, a1, 0);
+f_store(false, false, a1, 1);
+f_store(false, true, a1, 1);
+%OptimizeFunctionOnNextCall(f_store);
+f_store(false, false, a1, 2);
+assertEquals(undefined, a1[2]);
+
+function test_arg(expected) {
+ return function(v) {
+ assertEquals(expected, v);
+ }
+}
+
+function f_call(f, test, test2, i) {
+ var o = [0.5,1,,3];
+ var d;
+ if (test) {
+ d = 1.5;
+ } else {
+ d = o[i];
+ }
+ if (test2) {
+ d += 1;
+ }
+ f(d);
+ return d;
+}
+
+f_call(test_arg(1.5), true, false, 0);
+f_call(test_arg(2.5), true, true, 0);
+f_call(test_arg(1), false, false, 1);
+f_call(test_arg(2), false, true, 1);
+%OptimizeFunctionOnNextCall(f_call);
+f_call(test_arg(undefined), false, false, 2);
+
+
+function f_external(test, test2, test3, a, i) {
+ var o = [0.5,1,,3];
+ var d;
+ if (test) {
+ d = 1.5;
+ } else {
+ d = o[i];
+ }
+ if (test2) {
+ d += 1;
+ }
+ if (test3) {
+ d = d|0;
+ }
+ a[d] = 1;
+ assertEquals(1, a[d]);
+ return d;
+}
+
+var a2 = new Int32Array(10);
+f_external(true, false, true, a2, 0);
+f_external(true, true, true, a2, 0);
+f_external(false, false, true, a2, 1);
+f_external(false, true, true, a2, 1);
+%OptimizeFunctionOnNextCall(f_external);
+f_external(false, false, false, a2, 2);
+assertEquals(1, a2[undefined]);
diff --git a/deps/v8/test/mjsunit/regress/regress-copy-hole-to-field.js b/deps/v8/test/mjsunit/regress/regress-copy-hole-to-field.js
new file mode 100644
index 0000000000..fa3db92928
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-copy-hole-to-field.js
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Copy a hole from HOLEY_DOUBLE to double field.
+var a = [1.5,,1.7];
+var o = {a:1.8};
+
+function f1(o,a,i) {
+ o.a = a[i];
+}
+
+f1(o,a,0);
+f1(o,a,0);
+assertEquals(1.5, o.a);
+%OptimizeFunctionOnNextCall(f1);
+f1(o,a,1);
+assertEquals(undefined, o.a);
+
+// Copy a hole from HOLEY_SMI to smi field.
+var a = [1,,3];
+var o = {ab:5};
+
+function f2(o,a,i) {
+ o.ab = a[i];
+}
+
+f2(o,a,0);
+f2(o,a,0);
+%OptimizeFunctionOnNextCall(f2);
+f2(o,a,1);
+assertEquals(undefined, o.ab);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-240032.js b/deps/v8/test/mjsunit/regress/regress-crbug-240032.js
new file mode 100644
index 0000000000..7ce95d34bd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-240032.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Generate closures in that live in new-space.
+function mk() {
+ return function() {};
+}
+assertInstanceof(mk(), Function);
+assertInstanceof(mk(), Function);
+
+// Setup constant function using above closures.
+var o = {};
+o.func = mk();
+
+// Optimize object comparison with new-space RHS.
+function cmp(o, f) {
+ return f === o.func;
+}
+assertTrue(cmp(o, o.func));
+assertTrue(cmp(o, o.func));
+%OptimizeFunctionOnNextCall(cmp);
+assertTrue(cmp(o, o.func));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-242924.js b/deps/v8/test/mjsunit/regress/regress-crbug-242924.js
new file mode 100644
index 0000000000..68ad7c6fd4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-242924.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc --gc-global
+
+function f() {
+ return [,{}];
+}
+
+assertEquals([,{}], f());
+assertEquals([,{}], f());
+%OptimizeFunctionOnNextCall(f);
+assertEquals([,{}], f());
+gc();
+
+function g() {
+ return [[,1.5],{}];
+}
+
+assertEquals([[,1.5],{}], g());
+assertEquals([[,1.5],{}], g());
+%OptimizeFunctionOnNextCall(g);
+assertEquals([[,1.5],{}], g());
+gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-243868.js b/deps/v8/test/mjsunit/regress/regress-crbug-243868.js
new file mode 100644
index 0000000000..106d9cc78b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-243868.js
@@ -0,0 +1,46 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var non_const_true = true;
+
+function f(o) {
+ return (non_const_true && (o.val == null || false));
+}
+
+// Create an object with a constant function in another realm.
+var realm = Realm.create();
+var realmObject = Realm.eval(realm, "function g() {}; var o = { val:g }; o;")
+
+// Make the CompareNil IC in the function monomorphic.
+assertFalse(f(realmObject));
+assertFalse(f(realmObject));
+
+// Optimize the function containing the CompareNil IC.
+%OptimizeFunctionOnNextCall(f);
+assertFalse(f(realmObject));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-244461.js b/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
new file mode 100644
index 0000000000..9c7c2b6c43
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
@@ -0,0 +1,41 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --track-allocation-sites
+
+function foo(arg) {
+ var a = arg();
+ return a;
+}
+
+
+foo(Array);
+foo(Array);
+%OptimizeFunctionOnNextCall(foo);
+// Compilation of foo will crash without the bugfix for 244461
+foo(Array);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-245424.js b/deps/v8/test/mjsunit/regress/regress-crbug-245424.js
new file mode 100644
index 0000000000..005c8baba9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-245424.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function boom() {
+ var a = {
+ foo: "bar",
+ foo: "baz"
+ };
+ return a;
+}
+
+assertEquals("baz", boom().foo);
+assertEquals("baz", boom().foo);
+%OptimizeFunctionOnNextCall(boom);
+assertEquals("baz", boom().foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-248025.js b/deps/v8/test/mjsunit/regress/regress-crbug-248025.js
new file mode 100644
index 0000000000..c598859566
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-248025.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-iteration
+
+// Filler long enough to trigger lazy parsing.
+var filler = "//" + new Array(1024).join('x');
+
+// Test that the pre-parser does not crash when the expected contextual
+// keyword as part if a 'for' statement is not and identifier.
+try {
+ eval(filler + "\nfunction f() { for (x : y) { } }");
+ throw "not reached";
+} catch (e) {
+ if (!(e instanceof SyntaxError)) throw e;
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-int32-truncation.js b/deps/v8/test/mjsunit/regress/regress-int32-truncation.js
new file mode 100644
index 0000000000..dec4ac1195
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-int32-truncation.js
@@ -0,0 +1,61 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(i, b) {
+ var a = 0;
+ if (b) {
+ var c = 1 << i;
+ a = c + c;
+ }
+ var x = a >> 3;
+ return a;
+}
+
+f(1, false);
+f(1, true);
+%OptimizeFunctionOnNextCall(f);
+assertEquals((1 << 30) * 2, f(30, true));
+
+
+var global = 1;
+
+function f2(b) {
+ var a = 0;
+ if (b) {
+ a = global;
+ }
+ var x = a >> 3;
+ return a;
+}
+
+f2(false);
+f2(true);
+%OptimizeFunctionOnNextCall(f2);
+global = 2.5;
+assertEquals(global, f2(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
new file mode 100644
index 0000000000..e2592a1615
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
@@ -0,0 +1,45 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function test() {
+ var string = %NewString(10, true);
+ for (var i = 0; i < 10; i++) {
+ %_OneByteSeqStringSetChar(string, i, 65);
+ %_OneByteSeqStringSetChar(string, i, 66);
+ }
+ for (var i = 0; i < 10; i++) {
+ assertEquals("B", string[i]);
+ }
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
+
diff --git a/deps/v8/test/mjsunit/smi-representation.js b/deps/v8/test/mjsunit/smi-representation.js
new file mode 100644
index 0000000000..882b5b91f0
--- /dev/null
+++ b/deps/v8/test/mjsunit/smi-representation.js
@@ -0,0 +1,68 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --track-fields --track-double-fields --allow-natives-syntax
+
+function smi_field() {
+ return {"smi":0};
+}
+
+function check_smi_repr(o, d1, d2) {
+ var s = o.smi;
+ var d = d1 - d2;
+ s = s + d;
+ o.smi = s;
+ return o;
+}
+
+var test = smi_field();
+check_smi_repr(smi_field(), 5, 3);
+check_smi_repr(smi_field(), 6, 2);
+%OptimizeFunctionOnNextCall(check_smi_repr);
+var val = check_smi_repr(smi_field(), 8, 1);
+assertTrue(%HaveSameMap(val, test));
+
+function tagged_smi_field() {
+ var o = {"tag":false};
+ o.tag = 10;
+ return o;
+}
+
+function check_smi_repr_from_tagged(o, o2) {
+ var t = o2.tag;
+ o.smi = t;
+ return o;
+}
+
+check_smi_repr_from_tagged(smi_field(), tagged_smi_field());
+check_smi_repr_from_tagged(smi_field(), tagged_smi_field());
+%OptimizeFunctionOnNextCall(check_smi_repr_from_tagged);
+var val = check_smi_repr_from_tagged(smi_field(), tagged_smi_field());
+assertTrue(%HaveSameMap(val, test));
+var overflow = tagged_smi_field();
+overflow.tag = 0x80000000;
+var val = check_smi_repr_from_tagged(smi_field(), overflow);
diff --git a/deps/v8/test/mjsunit/stack-traces.js b/deps/v8/test/mjsunit/stack-traces.js
index b5d58fa075..4a37ee6fa4 100644
--- a/deps/v8/test/mjsunit/stack-traces.js
+++ b/deps/v8/test/mjsunit/stack-traces.js
@@ -64,13 +64,13 @@ function testNestedEval() {
}
function testEvalWithSourceURL() {
- eval("function Doo() { FAIL; }; Doo();\n//@ sourceURL=res://name");
+ eval("function Doo() { FAIL; }; Doo();\n//# sourceURL=res://name");
}
function testNestedEvalWithSourceURL() {
var x = "FAIL";
var innerEval = 'function Inner() { eval(x); }\n//@ sourceURL=res://inner-eval';
- eval("function Outer() { eval(innerEval); Inner(); }; Outer();\n//@ sourceURL=res://outer-eval");
+ eval("function Outer() { eval(innerEval); Inner(); }; Outer();\n//# sourceURL=res://outer-eval");
}
function testValue() {
diff --git a/deps/v8/test/mjsunit/string-fromcharcode.js b/deps/v8/test/mjsunit/string-fromcharcode.js
index 631c04349f..ad3f7a96fb 100644
--- a/deps/v8/test/mjsunit/string-fromcharcode.js
+++ b/deps/v8/test/mjsunit/string-fromcharcode.js
@@ -103,6 +103,10 @@ for (var i = 0; i < 10; i++) {
test(i);
}
+assertEquals("AAAA", String.fromCharCode(65, 65, 65, 65));
+assertEquals("AAAA", String.fromCharCode(65, 65, 65, 65));
+%OptimizeFunctionOnNextCall(String.fromCharCode);
+assertEquals("AAAA", String.fromCharCode(65, 65, 65, 65));
// Test the custom IC works correctly when the map changes.
for (var i = 0; i < 10; i++) {
diff --git a/deps/v8/test/mjsunit/track-fields.js b/deps/v8/test/mjsunit/track-fields.js
index ced006c4fb..8b0ec29623 100644
--- a/deps/v8/test/mjsunit/track-fields.js
+++ b/deps/v8/test/mjsunit/track-fields.js
@@ -325,3 +325,83 @@ df3.first_double = 1.7;
df3.second_function = some_function2;
df1.first_double = 10;
read_first_double(df1);
+
+// Test boilerplates with computed values.
+function none_boilerplate(a) {
+ return {"a_none":a};
+}
+%OptimizeFunctionOnNextCall(none_boilerplate);
+var none_double1 = none_boilerplate(1.7);
+var none_double2 = none_boilerplate(1.9);
+assertTrue(%HaveSameMap(none_double1, none_double2));
+assertEquals(1.7, none_double1.a_none);
+assertEquals(1.9, none_double2.a_none);
+none_double2.a_none = 3.5;
+var none_double1 = none_boilerplate(1.7);
+var none_double2 = none_boilerplate(3.5);
+
+function none_to_smi(a) {
+ return {"a_smi":a};
+}
+
+var none_smi1 = none_to_smi(1);
+var none_smi2 = none_to_smi(2);
+%OptimizeFunctionOnNextCall(none_to_smi);
+var none_smi3 = none_to_smi(3);
+assertTrue(%HaveSameMap(none_smi1, none_smi2));
+assertTrue(%HaveSameMap(none_smi1, none_smi3));
+assertEquals(1, none_smi1.a_smi);
+assertEquals(2, none_smi2.a_smi);
+assertEquals(3, none_smi3.a_smi);
+
+function none_to_double(a) {
+ return {"a_double":a};
+}
+
+var none_to_double1 = none_to_double(1.5);
+var none_to_double2 = none_to_double(2.8);
+%OptimizeFunctionOnNextCall(none_to_double);
+var none_to_double3 = none_to_double(3.7);
+assertTrue(%HaveSameMap(none_to_double1, none_to_double2));
+assertTrue(%HaveSameMap(none_to_double1, none_to_double3));
+assertEquals(1.5, none_to_double1.a_double);
+assertEquals(2.8, none_to_double2.a_double);
+assertEquals(3.7, none_to_double3.a_double);
+
+function none_to_object(a) {
+ return {"an_object":a};
+}
+
+var none_to_object1 = none_to_object(true);
+var none_to_object2 = none_to_object(false);
+%OptimizeFunctionOnNextCall(none_to_object);
+var none_to_object3 = none_to_object(3.7);
+assertTrue(%HaveSameMap(none_to_object1, none_to_object2));
+assertTrue(%HaveSameMap(none_to_object1, none_to_object3));
+assertEquals(true, none_to_object1.an_object);
+assertEquals(false, none_to_object2.an_object);
+assertEquals(3.7, none_to_object3.an_object);
+
+function double_to_object(a) {
+ var o = {"d_to_h":1.8};
+ o.d_to_h = a;
+ return o;
+}
+
+var dh1 = double_to_object(true);
+var dh2 = double_to_object(false);
+assertTrue(%HaveSameMap(dh1, dh2));
+assertEquals(true, dh1.d_to_h);
+assertEquals(false, dh2.d_to_h);
+
+function smi_to_object(a) {
+ var o = {"s_to_t":18};
+ o.s_to_t = a;
+ return o;
+}
+
+var st1 = smi_to_object(true);
+var st2 = smi_to_object(false);
+assertTrue(%HaveSameMap(st1, st2));
+assertEquals(true, st1.s_to_t);
+assertEquals(false, st2.s_to_t);
diff --git a/deps/v8/tools/blink_tests/TestExpectations b/deps/v8/tools/blink_tests/TestExpectations
new file mode 100644
index 0000000000..49528a931e
--- /dev/null
+++ b/deps/v8/tools/blink_tests/TestExpectations
@@ -0,0 +1,23 @@
+# Created by test_result_analyzer.py
+# Date: 2013-06-07 15:09:29.286522
+# Waterfall: v8_blink
+# Revision type: webkit
+# Fail ratio: 0.2
+[ Linux Release x86 ] fast/js/JSON-stringify.html [ Skip ]
+[ Linux Release x86 ] fast/text/atsui-multiple-renderers.html [ Skip ]
+[ Linux Release x86 ] fast/text/international/complex-joining-using-gpos.html [ Skip ]
+[ Linux Release x86 ] fast/text/international/danda-space.html [ Skip ]
+[ Linux Release x86 ] fast/text/international/thai-baht-space.html [ Skip ]
+[ Linux Release x86 ] fast/text/international/thai-line-breaks.html [ Skip ]
+[ Linux Release x86 ] inspector/profiler/memory-instrumentation-external-array.html [ Skip ]
+[ Linux Release x86_64 ] fast/text/atsui-multiple-renderers.html [ Skip ]
+[ Linux Release x86_64 ] fast/text/international/complex-joining-using-gpos.html [ Skip ]
+[ Linux Release x86_64 ] fast/text/international/danda-space.html [ Skip ]
+[ Linux Release x86_64 ] fast/text/international/thai-baht-space.html [ Skip ]
+[ Linux Release x86_64 ] fast/text/international/thai-line-breaks.html [ Skip ]
+[ Linux Release x86_64 ] inspector/profiler/memory-instrumentation-external-array.html [ Skip ]
+[ Linux Debug ] fast/text/atsui-multiple-renderers.html [ Skip ]
+[ Linux Debug ] fast/text/international/complex-joining-using-gpos.html [ Skip ]
+[ Linux Debug ] fast/text/international/danda-space.html [ Skip ]
+[ Linux Debug ] fast/text/international/thai-baht-space.html [ Skip ]
+[ Linux Debug ] fast/text/international/thai-line-breaks.html [ Skip ]
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index f31fc4a9cb..ed37e72261 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -212,6 +212,7 @@
'../../src/arguments.h',
'../../src/assembler.cc',
'../../src/assembler.h',
+ '../../src/assert-scope.h',
'../../src/ast.cc',
'../../src/ast.h',
'../../src/atomicops.h',
@@ -321,10 +322,14 @@
'../../src/heap-snapshot-generator.h',
'../../src/heap.cc',
'../../src/heap.h',
+ '../../src/hydrogen-environment-liveness.cc',
+ '../../src/hydrogen-environment-liveness.h',
'../../src/hydrogen-instructions.cc',
'../../src/hydrogen-instructions.h',
'../../src/hydrogen.cc',
'../../src/hydrogen.h',
+ '../../src/hydrogen-gvn.cc',
+ '../../src/hydrogen-gvn.h',
'../../src/ic-inl.h',
'../../src/ic.cc',
'../../src/ic.h',
@@ -451,6 +456,10 @@
'../../src/transitions.h',
'../../src/type-info.cc',
'../../src/type-info.h',
+ '../../src/types.cc',
+ '../../src/types.h',
+ '../../src/typing.cc',
+ '../../src/typing.h',
'../../src/unbound-queue-inl.h',
'../../src/unbound-queue.h',
'../../src/unicode-inl.h',